summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c462
1 files changed, 443 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6083b1dcf050..1e7de0f03290 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -23,6 +23,7 @@
*
*/
+#include "dm_services.h"
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/dmub_srv.h"
@@ -34,6 +35,7 @@
#include "resource.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -198,6 +200,11 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ if (!dmub->debug.timeout_occured) {
+ dmub->debug.timeout_occured = true;
+ dmub->debug.timeout_cmd = *cmd_list;
+ dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
+ }
dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
@@ -238,7 +245,9 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
if (status == DMUB_STATUS_POWER_STATE_D3)
return false;
- dmub_srv_wait_for_idle(dmub, 100000);
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (status != DMUB_STATUS_OK)
+ return false;
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -420,6 +429,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
int pipe_idx = 0;
+ struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
return false;
@@ -443,7 +453,8 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
* that does not use FAMS, we are in an FPO + VActive scenario.
* Assign vactive stretch margin in this case.
*/
- if (!pipe->stream->fpo_in_use) {
+ stream_status = dc_state_get_stream_status(context, pipe->stream);
+ if (stream_status && !stream_status->fpo_in_use) {
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
@@ -454,7 +465,11 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (resource_is_pipe_type(pipe, OTG_MASTER) && pipe->stream->fpo_in_use) {
+ if (!resource_is_pipe_type(pipe, OTG_MASTER))
+ continue;
+
+ stream_status = dc_state_get_stream_status(context, pipe->stream);
+ if (stream_status && stream_status->fpo_in_use) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
@@ -504,7 +519,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
union dmub_rb_cmd cmd = { 0 };
unsigned int panel_inst = 0;
- dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
+ if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst))
+ return;
memset(&cmd, 0, sizeof(cmd));
@@ -549,7 +565,7 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
{
struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
+ struct dc_crtc_timing *phantom_timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
@@ -563,6 +579,11 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
uint16_t min_vtotal_supported = 0;
uint16_t max_vtotal_supported = 0;
+ if (!phantom_stream)
+ return;
+
+ phantom_timing = &phantom_stream->timing;
+
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
@@ -686,7 +707,13 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+ if (!phantom_stream0)
+ return;
+
phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+ if (!phantom_stream1)
+ return;
+
phantom_timing0 = &phantom_stream0->timing;
phantom_timing1 = &phantom_stream1->timing;
@@ -741,9 +768,14 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
+ struct dc_crtc_timing *phantom_timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
+ if (!phantom_stream)
+ return;
+
+ phantom_timing = &phantom_stream->timing;
+
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
@@ -904,12 +936,15 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_diagnostic_data diag_data = {0};
+ uint32_t i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
DC_LOG_ERROR("%s: invalid parameters.", __func__);
return;
}
+ DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
+
if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) {
DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
return;
@@ -933,7 +968,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]);
DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]);
DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]);
- DC_LOG_DEBUG(" pc : %08x", diag_data.pc);
+ for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
+ DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]);
DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr);
DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr);
DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr);
@@ -943,6 +979,9 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
DC_LOG_DEBUG(" inbox0_rptr : %08x", diag_data.inbox0_rptr);
DC_LOG_DEBUG(" inbox0_wptr : %08x", diag_data.inbox0_wptr);
DC_LOG_DEBUG(" inbox0_size : %08x", diag_data.inbox0_size);
+ DC_LOG_DEBUG(" outbox1_rptr : %08x", diag_data.outbox1_rptr);
+ DC_LOG_DEBUG(" outbox1_wptr : %08x", diag_data.outbox1_wptr);
+ DC_LOG_DEBUG(" outbox1_size : %08x", diag_data.outbox1_size);
DC_LOG_DEBUG(" is_enabled : %d", diag_data.is_dmcub_enabled);
DC_LOG_DEBUG(" is_soft_reset : %d", diag_data.is_dmcub_soft_reset);
DC_LOG_DEBUG(" is_secure_reset : %d", diag_data.is_dmcub_secure_reset);
@@ -1199,8 +1238,23 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
return true;
}
+static int count_active_streams(const struct dc *dc)
+{
+ int i, count = 0;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (stream && !stream->dpms_off)
+ count += 1;
+ }
+
+ return count;
+}
+
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
+ volatile const struct dmub_shared_state_ips_fw *ips_fw;
struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
@@ -1211,6 +1265,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
return;
dc_dmub_srv = dc->ctx->dmub_srv;
+ ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
@@ -1221,11 +1276,20 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
+ if (dc->work_arounds.skip_psr_ips_crtc_disable)
+ cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
+
if (allow_idle) {
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals new_signals;
+ DC_LOG_IPS(
+ "%s wait idle (ips1_commit=%u ips2_commit=%u)",
+ __func__,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
memset(&new_signals, 0, sizeof(new_signals));
@@ -1245,19 +1309,47 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
+ } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
+ /* TODO: Move this logic out to hwseq */
+ if (count_active_streams(dc) == 0) {
+ /* IPS2 - Display off */
+ new_signals.bits.allow_pg = 1;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 1;
+ new_signals.bits.allow_z10 = 1;
+ } else {
+ /* RCG only */
+ new_signals.bits.allow_pg = 0;
+ new_signals.bits.allow_ips1 = 1;
+ new_signals.bits.allow_ips2 = 0;
+ new_signals.bits.allow_z10 = 0;
+ }
}
ips_driver->signals = new_signals;
+ dc_dmub_srv->driver_signals = ips_driver->signals;
}
+ DC_LOG_IPS(
+ "%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
+ __func__,
+ allow_idle,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
/* We also do not perform a wait since DMCUB could enter idle after the notification. */
dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+
+ /* Register access should stop at this point. */
+ if (allow_idle)
+ dc_dmub_srv->needs_idle_wake = true;
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv;
+ uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
if (dc->debug.dmcub_emulation)
return;
@@ -1274,43 +1366,117 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
+ rcg_exit_count = ips_fw->rcg_exit_count;
+ ips1_exit_count = ips_fw->ips1_exit_count;
+ ips2_exit_count = ips_fw->ips2_exit_count;
+
ips_driver->signals.all = 0;
+ dc_dmub_srv->driver_signals = ips_driver->signals;
+
+ DC_LOG_IPS(
+ "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
+ __func__,
+ ips_driver->signals.bits.allow_ips1,
+ ips_driver->signals.bits.allow_ips2,
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit,
+ ips_fw->rcg_entry_count,
+ ips_fw->ips1_entry_count,
+ ips_fw->ips2_entry_count);
+
+ /* Note: register access has technically not resumed for DCN here, but we
+ * need to be message PMFW through our standard register interface.
+ */
+ dc_dmub_srv->needs_idle_wake = false;
- if (prev_driver_signals.bits.allow_ips2) {
- udelay(dc->debug.ips2_eval_delay_us);
+ if (prev_driver_signals.bits.allow_ips2 &&
+ (!dc->debug.optimize_ips_handshake ||
+ ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
+ DC_LOG_IPS(
+ "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
+ if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
+ udelay(dc->debug.ips2_eval_delay_us);
if (ips_fw->signals.bits.ips2_commit) {
+ DC_LOG_IPS(
+ "exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
+ DC_LOG_IPS(
+ "exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
+ DC_LOG_IPS(
+ "wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips2_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+ DC_LOG_IPS(
+ "resync inbox1 (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (prev_driver_signals.bits.allow_ips1) {
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
+
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
+ DC_LOG_IPS(
+ "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
+ ips_fw->signals.bits.ips1_commit,
+ ips_fw->signals.bits.ips2_commit);
}
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
+
+ DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
+ __func__,
+ rcg_exit_count,
+ ips1_exit_count,
+ ips2_exit_count);
}
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
{
struct dmub_srv *dmub;
@@ -1319,12 +1485,68 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
dmub = dc_dmub_srv->dmub;
- if (powerState == DC_ACPI_CM_POWER_STATE_D0)
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0)
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
else
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
}
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
+ cmd.idle_opt_set_dc_power_state.header.payload_bytes =
+ sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
+
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
+ } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
+ } else {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
+{
+ volatile const struct dmub_shared_state_ips_fw *ips_fw;
+ bool reallow_idle = false, should_detect = false;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
+
+ if (dc_dmub_srv->dmub->shared_state &&
+ dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) {
+ ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
+ return ips_fw->signals.bits.detection_required;
+ }
+
+ /* Detection may require reading scratch 0 - exit out of idle prior to the read. */
+ if (dc_dmub_srv->idle_allowed) {
+ dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
+ reallow_idle = true;
+ }
+
+ should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub);
+
+ /* Re-enter idle if we're not about to immediately redetect links. */
+ if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
+ dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
+
+ return should_detect;
+}
+
void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
@@ -1332,24 +1554,47 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return;
+ allow_idle &= (!dc->debug.ips_disallow_entry);
+
if (dc_dmub_srv->idle_allowed == allow_idle)
return;
+ DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
+
/*
* Entering a low power state requires a driver notification.
* Powering up the hardware requires notifying PMFW and DMCUB.
* Clearing the driver idle allow requires a DMCUB command.
* DMCUB commands requires the DMCUB to be powered up and restored.
- *
- * Exit out early to prevent an infinite loop of DMCUB commands
- * triggering exit low power - use software state to track this.
*/
- dc_dmub_srv->idle_allowed = allow_idle;
- if (!allow_idle)
+ if (!allow_idle) {
+ dc_dmub_srv->idle_exit_counter += 1;
+
dc_dmub_srv_exit_low_power_state(dc);
- else
+ /*
+ * Idle is considered fully exited only after the sequence above
+ * fully completes. If we have a race of two threads exiting
+ * at the same time then it's safe to perform the sequence
+ * twice as long as we're not re-entering.
+ *
+ * Infinite command submission is avoided by using the
+ * dm_execute_dmub_cmd submission instead of the "wake" helpers.
+ */
+ dc_dmub_srv->idle_allowed = false;
+
+ dc_dmub_srv->idle_exit_counter -= 1;
+ if (dc_dmub_srv->idle_exit_counter < 0) {
+ ASSERT(0);
+ dc_dmub_srv->idle_exit_counter = 0;
+ }
+ } else {
+ /* Consider idle as notified prior to the actual submission to
+ * prevent multiple entries. */
+ dc_dmub_srv->idle_allowed = true;
+
dc_dmub_srv_notify_idle(dc, allow_idle);
+ }
}
bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
@@ -1384,7 +1629,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1433,8 +1679,186 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
+ if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
+ !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
}
+
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ uint8_t num_cmds = 1;
+ uint32_t i;
+ union dmub_rb_cmd cmd[MAX_STREAMS + 1];
+ struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
+
+ memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
+ /* fill in generic command header */
+ global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+
+ if (enable) {
+ /* send global configuration parameters */
+ memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
+
+ /* copy static feature configuration overrides */
+ global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
+ global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
+ global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
+
+ /* construct per-stream configs */
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
+ struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
+
+ /* configure command header */
+ stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_cmd->header.multi_cmd_pending = 1;
+ /* copy stream static state */
+ memcpy(&stream_cmd->config.stream,
+ &context->bw_ctx.bw.dcn.fams2_stream_params[i],
+ sizeof(struct dmub_fams2_stream_static_state));
+ }
+ }
+
+ /* apply feature configuration based on current driver state */
+ global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ global_cmd->config.global.features.bits.enable = enable;
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
+ /* set multi pending for global, and unset for last stream cmd */
+ global_cmd->header.multi_cmd_pending = 1;
+ cmd[context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
+ }
+
+ dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_drr_update(struct dc *dc,
+ uint32_t tg_inst,
+ uint32_t vtotal_min,
+ uint32_t vtotal_max,
+ uint32_t vtotal_mid,
+ uint32_t vtotal_mid_frame_num,
+ bool program_manual_trigger)
+{
+ union dmub_rb_cmd cmd = { 0 };
+
+ cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
+ cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
+ cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
+
+ cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_passthrough_flip(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ int plane_index;
+ union dmub_rb_cmd cmds[MAX_PLANES];
+ struct dc_plane_address *address;
+ struct dc_plane_state *plane_state;
+ int num_cmds = 0;
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream);
+
+ if (surface_count <= 0 || stream_status == NULL)
+ return;
+
+ memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
+
+ /* build command for each surface update */
+ for (plane_index = 0; plane_index < surface_count; plane_index++) {
+ plane_state = srf_updates[plane_index].surface;
+ address = &plane_state->address;
+
+ /* skip if there is no address update for plane */
+ if (!srf_updates[plane_index].flip_addr)
+ continue;
+
+ /* build command header */
+ cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
+ cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+
+ /* for chaining multiple commands, all but last command should set to 1 */
+ cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
+
+ /* set topology info */
+ cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
+ if (stream_status)
+ cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
+
+ cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
+
+ /* build address info for command */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
+ address->grph.meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
+ (uint16_t)address->grph.meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
+ address->grph.addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
+ (uint16_t)address->grph.addr.high_part;
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0 ||
+ address->video_progressive.chroma_addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
+ address->video_progressive.luma_meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
+ (uint16_t)address->video_progressive.luma_meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
+ address->video_progressive.chroma_meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
+ (uint16_t)address->video_progressive.chroma_meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
+ address->video_progressive.luma_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
+ (uint16_t)address->video_progressive.luma_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
+ address->video_progressive.chroma_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
+ (uint16_t)address->video_progressive.chroma_addr.high_part;
+ break;
+ default:
+ // Should never be hit
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ num_cmds++;
+ }
+
+ if (num_cmds > 0) {
+ cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
+ dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+}