summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/disp/dpu1
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/disp/dpu1')
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h291
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h449
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c105
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c60
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c127
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c154
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c61
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h3
19 files changed, 1503 insertions, 275 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
new file mode 100644
index 000000000000..424815e7fb7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_2_sdm660.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023. Linaro Inc. All rights reserved.
+ */
+
+#ifndef _DPU_3_2_SDM660_H
+#define _DPU_3_2_SDM660_H
+
+static const struct dpu_caps sdm660_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sdm660_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x458,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sdm660_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg sdm660_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG1,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sdm660_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ },
+};
+
+static const struct dpu_pingpong_cfg sdm660_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x70800, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x71800, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15),
+ },
+};
+
+static const struct dpu_dsc_cfg sdm660_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_dspp_cfg sdm660_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg sdm660_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x6b000, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sdm660_perf_data = {
+ .max_bw_low = 6600000,
+ .max_bw_high = 6600000,
+ .min_core_ib = 3100000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sdm660_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_sdm660_cfg = {
+ .mdss_ver = &sdm660_mdss_ver,
+ .caps = &sdm660_dpu_caps,
+ .mdp = &sdm660_mdp,
+ .ctl_count = ARRAY_SIZE(sdm660_ctl),
+ .ctl = sdm660_ctl,
+ .sspp_count = ARRAY_SIZE(sdm660_sspp),
+ .sspp = sdm660_sspp,
+ .mixer_count = ARRAY_SIZE(sdm660_lm),
+ .mixer = sdm660_lm,
+ .dspp_count = ARRAY_SIZE(sdm660_dspp),
+ .dspp = sdm660_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm660_pp),
+ .pingpong = sdm660_pp,
+ .dsc_count = ARRAY_SIZE(sdm660_dsc),
+ .dsc = sdm660_dsc,
+ .intf_count = ARRAY_SIZE(sdm660_intf),
+ .intf = sdm660_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .perf = &sdm660_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
new file mode 100644
index 000000000000..df01227fc364
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_3_sdm630.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023. Linaro Inc. All rights reserved.
+ */
+
+#ifndef _DPU_3_3_SDM630_H
+#define _DPU_3_3_SDM630_H
+
+static const struct dpu_caps sdm630_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ .max_mixer_blendstages = 0x7,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = DEFAULT_DPU_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .max_hdeci_exp = MAX_HORZ_DECIMATION,
+ .max_vdeci_exp = MAX_VERT_DECIMATION,
+};
+
+static const struct dpu_mdp_cfg sdm630_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x458,
+ .features = BIT(DPU_MDP_VSYNC_SEL),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x3a8, .bit_off = 16 },
+ },
+};
+
+static const struct dpu_ctl_cfg sdm630_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0x94,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0x94,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+};
+
+static const struct dpu_sspp_cfg sdm630_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1ac,
+ .features = VIG_MSM8998_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1ac,
+ .features = DMA_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1ac,
+ .features = DMA_CURSOR_MSM8998_MASK,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_lm_cfg sdm630_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &msm8998_lm_sblk,
+ .pingpong = PINGPONG_2,
+ },
+};
+
+static const struct dpu_pingpong_cfg sdm630_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x70000, .len = 0xd4,
+ .features = PINGPONG_SDM845_TE2_MASK,
+ .sblk = &sdm845_pp_sblk_te,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x71000, .len = 0xd4,
+ .features = PINGPONG_SDM845_MASK,
+ .sblk = &sdm845_pp_sblk,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ .intr_rdptr = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14),
+ },
+};
+
+static const struct dpu_dspp_cfg sdm630_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &msm8998_dspp_sblk,
+ },
+};
+
+static const struct dpu_intf_cfg sdm630_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x6a000, .len = 0x280,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ .intr_tear_rd_ptr = -1,
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x6a800, .len = 0x280,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 21,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = -1,
+ },
+};
+
+static const struct dpu_perf_cfg sdm630_perf_data = {
+ .max_bw_low = 4100000,
+ .max_bw_high = 4100000,
+ .min_core_ib = 3200000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 25,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfffc, 0xff00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(msm8998_qos_linear),
+ .entries = msm8998_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_macrotile),
+ .entries = msm8998_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(msm8998_qos_nrt),
+ .entries = msm8998_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 200,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sdm630_mdss_ver = {
+ .core_major_ver = 3,
+ .core_minor_ver = 3,
+};
+
+const struct dpu_mdss_cfg dpu_sdm630_cfg = {
+ .mdss_ver = &sdm630_mdss_ver,
+ .caps = &sdm630_dpu_caps,
+ .mdp = &sdm630_mdp,
+ .ctl_count = ARRAY_SIZE(sdm630_ctl),
+ .ctl = sdm630_ctl,
+ .sspp_count = ARRAY_SIZE(sdm630_sspp),
+ .sspp = sdm630_sspp,
+ .mixer_count = ARRAY_SIZE(sdm630_lm),
+ .mixer = sdm630_lm,
+ .dspp_count = ARRAY_SIZE(sdm630_dspp),
+ .dspp = sdm630_dspp,
+ .pingpong_count = ARRAY_SIZE(sdm630_pp),
+ .pingpong = sdm630_pp,
+ .intf_count = ARRAY_SIZE(sdm630_intf),
+ .intf = sdm630_intf,
+ .vbif_count = ARRAY_SIZE(msm8998_vbif),
+ .vbif = msm8998_vbif,
+ .perf = &sdm630_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
new file mode 100644
index 000000000000..9a9f7092c526
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DPU_9_2_X1E80100_H
+#define _DPU_9_2_X1E80100_H
+
+static const struct dpu_caps x1e80100_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg x1e80100_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg x1e80100_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg x1e80100_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg x1e80100_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x320,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg x1e80100_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg x1e80100_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ },
+};
+
+static const struct dpu_merge_3d_cfg x1e80100_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg x1e80100_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x4,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg x1e80100_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg x1e80100_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_3,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ },
+};
+
+static const struct dpu_perf_cfg x1e80100_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version x1e80100_mdss_ver = {
+ .core_major_ver = 9,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_x1e80100_cfg = {
+ .mdss_ver = &x1e80100_mdss_ver,
+ .caps = &x1e80100_dpu_caps,
+ .mdp = &x1e80100_mdp,
+ .ctl_count = ARRAY_SIZE(x1e80100_ctl),
+ .ctl = x1e80100_ctl,
+ .sspp_count = ARRAY_SIZE(x1e80100_sspp),
+ .sspp = x1e80100_sspp,
+ .mixer_count = ARRAY_SIZE(x1e80100_lm),
+ .mixer = x1e80100_lm,
+ .dspp_count = ARRAY_SIZE(x1e80100_dspp),
+ .dspp = x1e80100_dspp,
+ .pingpong_count = ARRAY_SIZE(x1e80100_pp),
+ .pingpong = x1e80100_pp,
+ .dsc_count = ARRAY_SIZE(x1e80100_dsc),
+ .dsc = x1e80100_dsc,
+ .merge_3d_count = ARRAY_SIZE(x1e80100_merge_3d),
+ .merge_3d = x1e80100_merge_3d,
+ .wb_count = ARRAY_SIZE(x1e80100_wb),
+ .wb = x1e80100_wb,
+ .intf_count = ARRAY_SIZE(x1e80100_intf),
+ .intf = x1e80100_intf,
+ .vbif_count = ARRAY_SIZE(sm8550_vbif),
+ .vbif = sm8550_vbif,
+ .perf = &x1e80100_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 6a4b489d44e5..194dbb08331d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -226,6 +226,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
return dpu_enc->wide_bus_en;
}
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->dsc ? true : false;
+}
+
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -584,7 +591,6 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_global_state *global_state;
struct drm_framebuffer *fb;
struct drm_dsc_config *dsc;
- int i = 0;
int ret = 0;
if (!drm_enc || !crtc_state || !conn_state) {
@@ -605,20 +611,6 @@ static int dpu_encoder_virt_atomic_check(
trace_dpu_enc_atomic_check(DRMID(drm_enc));
- /* perform atomic check on the first physical encoder (master) */
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys->ops.atomic_check)
- ret = phys->ops.atomic_check(phys, crtc_state,
- conn_state);
- if (ret) {
- DPU_ERROR_ENC(dpu_enc,
- "mode unsupported, phys idx %d\n", i);
- return ret;
- }
- }
-
dsc = dpu_encoder_get_dsc_config(drm_enc);
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
@@ -714,7 +706,7 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
}
}
-static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
int i;
@@ -726,18 +718,35 @@ static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys->ops.irq_control)
- phys->ops.irq_control(phys, enable);
+ phys->ops.irq_enable(phys);
}
+}
+
+static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ phys->ops.irq_disable(phys);
+ }
}
-static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
- bool enable)
+static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
@@ -747,28 +756,42 @@ static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
- trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+ trace_dpu_enc_rc_enable(DRMID(drm_enc));
if (!dpu_enc->cur_master) {
DPU_ERROR("encoder master not set\n");
return;
}
- if (enable) {
- /* enable DPU core clks */
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
- /* enable all the irq */
- _dpu_encoder_irq_control(drm_enc, true);
+ /* enable all the irq */
+ _dpu_encoder_irq_enable(drm_enc);
+}
- } else {
- /* disable all the irq */
- _dpu_encoder_irq_control(drm_enc, false);
+static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
- /* disable DPU core clks */
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ trace_dpu_enc_rc_disable(DRMID(drm_enc));
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
}
+ /* disable all the irq */
+ _dpu_encoder_irq_disable(drm_enc);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
@@ -824,9 +847,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
}
if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
- _dpu_encoder_irq_control(drm_enc, true);
+ _dpu_encoder_irq_enable(drm_enc);
else
- _dpu_encoder_resource_control_helper(drm_enc, true);
+ _dpu_encoder_resource_enable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
@@ -879,7 +902,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
if (is_vid_mode &&
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
- _dpu_encoder_irq_control(drm_enc, true);
+ _dpu_encoder_irq_enable(drm_enc);
}
/* skip if is already OFF or IDLE, resources are off already */
else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
@@ -921,7 +944,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
* and in IDLE state the resources are already disabled
*/
if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
- _dpu_encoder_resource_control_helper(drm_enc, false);
+ _dpu_encoder_resource_disable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
@@ -954,9 +977,9 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
}
if (is_vid_mode)
- _dpu_encoder_irq_control(drm_enc, false);
+ _dpu_encoder_irq_disable(drm_enc);
else
- _dpu_encoder_resource_control_helper(drm_enc, false);
+ _dpu_encoder_resource_disable(drm_enc);
dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
@@ -1121,8 +1144,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
phys->cached_mode = crtc_state->adjusted_mode;
- if (phys->ops.atomic_mode_set)
- phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
}
}
@@ -1853,7 +1874,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ if (dpu_encoder_use_dsc_merge(enc_master->parent))
+ dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 4c05fd5e9ed1..fe6b1d312a74 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -159,6 +159,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
/**
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
+ * for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
+
+/**
* dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
* in virtual encoder that can collect CRC values
* @drm_enc: Pointer to previously created drm encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 993f26343331..dd9e3603d120 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -69,11 +69,8 @@ struct dpu_encoder_phys;
* @is_master: Whether this phys_enc is the current master
* encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID).
- * @atomic_mode_set: DRM Call. Set a DRM mode.
- * This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
- * @atomic_check: DRM Call. Atomic check new DRM state.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
@@ -85,7 +82,8 @@ struct dpu_encoder_phys;
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @irq_enable: Handler to enable all the encoder IRQs
+ * @irq_disable: Handler to disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
* @restore: Restore all the encoder configs.
@@ -95,14 +93,8 @@ struct dpu_encoder_phys;
struct dpu_encoder_phys_ops {
void (*prepare_commit)(struct dpu_encoder_phys *encoder);
bool (*is_master)(struct dpu_encoder_phys *encoder);
- void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state);
void (*enable)(struct dpu_encoder_phys *encoder);
void (*disable)(struct dpu_encoder_phys *encoder);
- int (*atomic_check)(struct dpu_encoder_phys *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
@@ -110,7 +102,8 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
- void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*irq_enable)(struct dpu_encoder_phys *phys);
+ void (*irq_disable)(struct dpu_encoder_phys *phys);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a301e2833177..fc1d5736d7fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -142,23 +142,6 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
-static void dpu_encoder_phys_cmd_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
-
- phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
-
- if (phys_enc->has_intf_te)
- phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
- else
- phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
-
- phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
-}
-
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
@@ -291,40 +274,54 @@ end:
return ret;
}
-static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
- bool enable)
+static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
{
- trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
- phys_enc->hw_pp->idx - PINGPONG_0,
- enable, phys_enc->vblank_refcount);
+ trace_dpu_enc_phys_cmd_irq_enable(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->vblank_refcount);
- if (enable) {
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_PINGPONG],
- dpu_encoder_phys_cmd_pp_tx_done_irq,
- phys_enc);
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
+
+ if (phys_enc->has_intf_te)
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
+ else
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN],
- dpu_encoder_phys_cmd_underrun_irq,
- phys_enc);
- dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
-
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_CTL_START],
- dpu_encoder_phys_cmd_ctl_start_irq,
- phys_enc);
- } else {
- if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_CTL_START]);
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
+}
+static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
+{
+ trace_dpu_enc_phys_cmd_irq_disable(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->vblank_refcount);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN]);
- dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_PINGPONG]);
- }
+ phys_enc->irq[INTR_IDX_CTL_START]);
+
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
+
+ phys_enc->irq[INTR_IDX_CTL_START] = 0;
+ phys_enc->irq[INTR_IDX_PINGPONG] = 0;
+ phys_enc->irq[INTR_IDX_RDPTR] = 0;
}
static void dpu_encoder_phys_cmd_tearcheck_config(
@@ -704,7 +701,6 @@ static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_cmd_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
@@ -713,7 +709,8 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->irq_enable = dpu_encoder_phys_cmd_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_cmd_irq_disable;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
@@ -742,6 +739,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
+
cmd_enc->stream_sel = 0;
if (!phys_enc->hw_intf) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index d0f56c5c4cce..2aa72b578764 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -102,6 +102,7 @@ static void drm_mode_to_intf_timing_params(
}
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
/*
* for DP, divide the horizonal parameters by 2 when
@@ -349,16 +350,6 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
return phys_enc->split_role != ENC_ROLE_SOLO;
}
-static void dpu_encoder_phys_vid_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
-
- phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
-}
-
static int dpu_encoder_phys_vid_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
@@ -615,30 +606,33 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
}
}
-static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
- bool enable)
+static void dpu_encoder_phys_vid_irq_enable(struct dpu_encoder_phys *phys_enc)
{
int ret;
- trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- phys_enc->hw_intf->idx - INTF_0,
- enable,
- phys_enc->vblank_refcount);
+ trace_dpu_enc_phys_vid_irq_enable(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ phys_enc->vblank_refcount);
- if (enable) {
- ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
- if (WARN_ON(ret))
- return;
-
- dpu_core_irq_register_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN],
- dpu_encoder_phys_vid_underrun_irq,
- phys_enc);
- } else {
- dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
- dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
- phys_enc->irq[INTR_IDX_UNDERRUN]);
- }
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (WARN_ON(ret))
+ return;
+
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
+}
+
+static void dpu_encoder_phys_vid_irq_disable(struct dpu_encoder_phys *phys_enc)
+{
+ trace_dpu_enc_phys_vid_irq_disable(DRMID(phys_enc->parent),
+ phys_enc->hw_intf->idx - INTF_0,
+ phys_enc->vblank_refcount);
+
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
}
static int dpu_encoder_phys_vid_get_line_count(
@@ -683,13 +677,13 @@ static int dpu_encoder_phys_vid_get_frame_count(
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
- ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->irq_enable = dpu_encoder_phys_vid_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_vid_irq_disable;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
@@ -721,6 +715,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 4cd2d9e3131a..8220cd920e6f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -355,59 +355,6 @@ static void dpu_encoder_helper_phys_setup_cdm(struct dpu_encoder_phys *phys_enc)
}
/**
- * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
- * @phys_enc: Pointer to physical encoder
- * @crtc_state: Pointer to CRTC atomic state
- * @conn_state: Pointer to connector atomic state
- */
-static int dpu_encoder_phys_wb_atomic_check(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct drm_framebuffer *fb;
- const struct drm_display_mode *mode = &crtc_state->mode;
-
- DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
- phys_enc->hw_wb->idx, mode->name, mode->hdisplay, mode->vdisplay);
-
- if (!conn_state || !conn_state->connector) {
- DPU_ERROR("invalid connector state\n");
- return -EINVAL;
- } else if (conn_state->connector->status !=
- connector_status_connected) {
- DPU_ERROR("connector not connected %d\n",
- conn_state->connector->status);
- return -EINVAL;
- }
-
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
- return 0;
-
- fb = conn_state->writeback_job->fb;
-
- DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
- fb->width, fb->height);
-
- if (fb->width != mode->hdisplay) {
- DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
- mode->hdisplay);
- return -EINVAL;
- } else if (fb->height != mode->vdisplay) {
- DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
- mode->vdisplay);
- return -EINVAL;
- } else if (fb->width > phys_enc->hw_wb->caps->maxlinewidth) {
- DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
- fb->width, phys_enc->hw_wb->caps->maxlinewidth);
- return -EINVAL;
- }
-
- return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
-}
-
-
-/**
* _dpu_encoder_phys_wb_update_flush - flush hardware update
* @phys_enc: Pointer to physical encoder
*/
@@ -511,31 +458,32 @@ static void dpu_encoder_phys_wb_done_irq(void *arg)
}
/**
- * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * dpu_encoder_phys_wb_irq_enable - irq control of WB
* @phys: Pointer to physical encoder
- * @enable: indicates enable or disable interrupts
*/
-static void dpu_encoder_phys_wb_irq_ctrl(
- struct dpu_encoder_phys *phys, bool enable)
+static void dpu_encoder_phys_wb_irq_enable(struct dpu_encoder_phys *phys)
{
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
- if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ if (atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
dpu_core_irq_register_callback(phys->dpu_kms,
- phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
- else if (!enable &&
- atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
- dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+ phys->irq[INTR_IDX_WB_DONE],
+ dpu_encoder_phys_wb_done_irq,
+ phys);
}
-static void dpu_encoder_phys_wb_atomic_mode_set(
- struct dpu_encoder_phys *phys_enc,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+/**
+ * dpu_encoder_phys_wb_irq_disable - irq control of WB
+ * @phys: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_irq_disable(struct dpu_encoder_phys *phys)
{
- phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
}
static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
@@ -774,10 +722,8 @@ static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phy
static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_wb_is_master;
- ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
- ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
@@ -785,7 +731,8 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
ops->trigger_start = dpu_encoder_helper_trigger_start;
ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
- ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->irq_enable = dpu_encoder_phys_wb_irq_enable;
+ ops->irq_disable = dpu_encoder_phys_wb_irq_disable;
ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
}
@@ -820,6 +767,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
atomic_set(&wb_enc->wbirq_refcount, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 54e8717403a0..f2b6eac7601d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -680,6 +680,8 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
*************************************************************/
#include "catalog/dpu_3_0_msm8998.h"
+#include "catalog/dpu_3_2_sdm660.h"
+#include "catalog/dpu_3_3_sdm630.h"
#include "catalog/dpu_4_0_sdm845.h"
#include "catalog/dpu_4_1_sdm670.h"
@@ -703,4 +705,6 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_9_0_sm8550.h"
+#include "catalog/dpu_9_2_x1e80100.h"
+
#include "catalog/dpu_10_0_sm8650.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index ba82ef4560a6..d1aef778340b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -832,6 +832,8 @@ struct dpu_mdss_cfg {
};
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm630_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm660_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
@@ -849,5 +851,6 @@ extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
+extern const struct dpu_mdss_cfg dpu_x1e80100_cfg;
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 6bba531d6dc4..965692ef7892 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -163,13 +163,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- /*
- * DATA_HCTL_EN controls data timing which can be different from
- * video timing. It is recommended to enable it for all cases, except
- * if compression is enabled in 1 pixel per clock mode
- */
if (p->wide_bus_en)
- intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
data_width = p->width;
@@ -229,6 +224,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (!(p->compression_en && !p->wide_bus_en))
+ intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
+
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 0bd57a32144a..6f4c87244f94 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -33,6 +33,7 @@ struct dpu_hw_intf_timing_params {
u32 hsync_skew;
bool wide_bus_en;
+ bool compression_en;
};
struct dpu_hw_intf_prog_fetch {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 723cc1d82143..d6412395bacc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -317,11 +317,6 @@ struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
struct msm_drm_private *priv = s->dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_private_state *priv_state;
- int ret;
-
- ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
- if (ret)
- return ERR_PTR(ret);
priv_state = drm_atomic_get_private_obj_state(s,
&dpu_kms->global_state);
@@ -362,8 +357,6 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
{
struct dpu_global_state *state;
- drm_modeset_lock_init(&dpu_kms->global_state_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -374,6 +367,11 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
return 0;
}
+static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms)
+{
+ drm_atomic_private_obj_fini(&dpu_kms->global_state);
+}
+
static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
@@ -630,23 +628,26 @@ static int _dpu_kms_initialize_writeback(struct drm_device *dev,
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
+ const enum dpu_wb wb_idx = WB_2;
+ u32 maxlinewidth;
int rc;
memset(&info, 0, sizeof(info));
info.num_of_h_tiles = 1;
/* use only WB idx 2 instance for DPU */
- info.h_tile_instance[0] = WB_2;
+ info.h_tile_instance[0] = wb_idx;
info.intf_type = INTF_WB;
+ maxlinewidth = dpu_rm_get_wb(&dpu_kms->rm, info.h_tile_instance[0])->caps->maxlinewidth;
+
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return PTR_ERR(encoder);
}
- rc = dpu_writeback_init(dev, encoder, wb_formats,
- n_formats);
+ rc = dpu_writeback_init(dev, encoder, wb_formats, n_formats, maxlinewidth);
if (rc) {
DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
return rc;
@@ -801,6 +802,8 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_kms->hw_vbif[i] = NULL;
}
+ dpu_kms_global_obj_fini(dpu_kms);
+
dpu_kms->catalog = NULL;
dpu_kms->hw_mdp = NULL;
@@ -1197,6 +1200,78 @@ static int dpu_kms_init(struct drm_device *ddev)
return 0;
}
+static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
+{
+ struct platform_device *pdev = dpu_kms->pdev;
+ struct platform_device *mdss_dev;
+ int ret;
+
+ if (!dev_is_platform(dpu_kms->pdev->dev.parent))
+ return -EINVAL;
+
+ mdss_dev = to_platform_device(dpu_kms->pdev->dev.parent);
+
+ dpu_kms->mmio = msm_ioremap(pdev, "mdp_phys");
+ if (IS_ERR(dpu_kms->mmio)) {
+ ret = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", ret);
+ dpu_kms->mmio = NULL;
+ return ret;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
+ dpu_kms->pdev,
+ "vbif_phys");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", ret);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ return ret;
+ }
+
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_mdss(mdss_dev,
+ dpu_kms->pdev,
+ "vbif_nrt_phys");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ return 0;
+}
+
+static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
+{
+ struct platform_device *pdev = dpu_kms->pdev;
+ int ret;
+
+ dpu_kms->mmio = msm_ioremap(pdev, "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ ret = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", ret);
+ dpu_kms->mmio = NULL;
+ return ret;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", ret);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ return ret;
+ }
+
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ }
+
+ return 0;
+}
+
static int dpu_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1204,6 +1279,9 @@ static int dpu_dev_probe(struct platform_device *pdev)
int irq;
int ret = 0;
+ if (!msm_disp_drv_should_bind(&pdev->dev, true))
+ return -ENODEV;
+
dpu_kms = devm_kzalloc(dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
return -ENOMEM;
@@ -1230,28 +1308,12 @@ static int dpu_dev_probe(struct platform_device *pdev)
dpu_kms->base.irq = irq;
- dpu_kms->mmio = msm_ioremap(pdev, "mdp");
- if (IS_ERR(dpu_kms->mmio)) {
- ret = PTR_ERR(dpu_kms->mmio);
- DPU_ERROR("mdp register memory map failed: %d\n", ret);
- dpu_kms->mmio = NULL;
- return ret;
- }
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
-
- dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
- if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
- ret = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
- DPU_ERROR("vbif register memory map failed: %d\n", ret);
- dpu_kms->vbif[VBIF_RT] = NULL;
+ if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,mdp5"))
+ ret = dpu_kms_mmap_mdp5(dpu_kms);
+ else
+ ret = dpu_kms_mmap_dpu(dpu_kms);
+ if (ret)
return ret;
- }
-
- dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(pdev, "vbif_nrt");
- if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
- dpu_kms->vbif[VBIF_NRT] = NULL;
- DPU_DEBUG("VBIF NRT is not defined");
- }
ret = dpu_kms_parse_data_bus_icc_path(dpu_kms);
if (ret)
@@ -1318,6 +1380,8 @@ static const struct dev_pm_ops dpu_pm_ops = {
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm630-mdp5", .data = &dpu_sdm630_cfg, },
+ { .compatible = "qcom,sdm660-mdp5", .data = &dpu_sdm660_cfg, },
{ .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
@@ -1334,6 +1398,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
{ .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
+ { .compatible = "qcom,x1e80100-dpu", .data = &dpu_x1e80100_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index d1207f4ec3ae..b5db3fc76ca6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -85,7 +85,6 @@ struct dpu_kms {
* Global private object state, Do not access directly, use
* dpu_kms_global_get_state()
*/
- struct drm_modeset_lock global_state_lock;
struct drm_private_obj global_state;
struct dpu_rm rm;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 724537ab776d..cb5ce3c62a22 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -409,29 +409,153 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
-static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
- struct dpu_global_state *global_state,
- struct drm_encoder *enc,
- const struct msm_display_topology *top)
+static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
+ int start,
+ uint32_t enc_id)
{
- int num_dsc = top->num_dsc;
int i;
- /* check if DSC required are allocated or not */
- for (i = 0; i < num_dsc; i++) {
- if (!rm->dsc_blks[i]) {
- DPU_ERROR("DSC %d does not exist\n", i);
- return -EIO;
+ for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
+ if (global_state->pingpong_to_enc_id[i] == enc_id)
+ return i;
+ }
+
+ return -ENAVAIL;
+}
+
+static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
+{
+ /*
+ * DSC with even index must be used with the PINGPONG with even index
+ * DSC with odd index must be used with the PINGPONG with odd index
+ */
+ if ((dsc_idx & 0x01) != (pp_idx & 0x01))
+ return -ENAVAIL;
+
+ return 0;
+}
+
+static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = 0;
+ int pp_idx = 0;
+ int dsc_idx;
+ int ret;
+
+ for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
+ num_dsc < top->num_dsc; dsc_idx++) {
+ if (!rm->dsc_blks[dsc_idx])
+ continue;
+
+ if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id))
+ continue;
+
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
+ if (ret)
+ return -ENAVAIL;
+
+ global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ num_dsc++;
+ pp_idx++;
+ }
+
+ if (num_dsc < top->num_dsc) {
+ DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
+ num_dsc, top->num_dsc);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t enc_id,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = 0;
+ int dsc_idx, pp_idx = 0;
+ int ret;
+
+ /* only start from even dsc index */
+ for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
+ num_dsc < top->num_dsc; dsc_idx += 2) {
+ if (!rm->dsc_blks[dsc_idx] ||
+ !rm->dsc_blks[dsc_idx + 1])
+ continue;
+
+ /* consective dsc index to be paired */
+ if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) ||
+ reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id))
+ continue;
+
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
+ if (ret) {
+ pp_idx = 0;
+ continue;
}
- if (global_state->dsc_to_enc_id[i]) {
- DPU_ERROR("DSC %d is already allocated\n", i);
- return -EIO;
+ pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id);
+ if (pp_idx < 0)
+ return -ENAVAIL;
+
+ ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx);
+ if (ret) {
+ pp_idx = 0;
+ continue;
}
+
+ global_state->dsc_to_enc_id[dsc_idx] = enc_id;
+ global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id;
+ num_dsc += 2;
+ pp_idx++; /* start for next pair */
}
- for (i = 0; i < num_dsc; i++)
- global_state->dsc_to_enc_id[i] = enc->base.id;
+ if (num_dsc < top->num_dsc) {
+ DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
+ num_dsc, top->num_dsc);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ uint32_t enc_id = enc->base.id;
+
+ if (!top->num_dsc || !top->num_intf)
+ return 0;
+
+ /*
+ * Facts:
+ * 1) no pingpong split (two layer mixers shared one pingpong)
+ * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc
+ * 3) even PINGPONG connects to even DSC
+ * 4) odd PINGPONG connects to odd DSC
+ * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0
+ * +--> pp_idx_1 --> dsc_idx_1
+ */
+
+ /* num_dsc should be either 1, 2 or 4 */
+ if (top->num_dsc > top->num_intf) /* merge mode */
+ return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top);
+ else
+ return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 35d03b121a0b..bd92fb2979aa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -273,6 +273,14 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_rc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_rc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -342,10 +350,6 @@ DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
TP_printk("id=%u, enable=%s",
__entry->drm_id, __entry->enable ? "true" : "false")
);
-DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
- TP_PROTO(uint32_t drm_id, bool enable),
- TP_ARGS(drm_id, enable)
-);
DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
TP_PROTO(uint32_t drm_id, bool enable),
TP_ARGS(drm_id, enable)
@@ -514,24 +518,41 @@ TRACE_EVENT(dpu_enc_wait_event_timeout,
__entry->expected_time, __entry->atomic_cnt)
);
-TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
- TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+TRACE_EVENT(dpu_enc_phys_cmd_irq_enable,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp,
int refcnt),
- TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_ARGS(drm_id, pp, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, refcnt=%d", __entry->drm_id,
+ __entry->pp,
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_disable,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp,
+ int refcnt),
+ TP_ARGS(drm_id, pp, refcnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_pingpong, pp )
- __field( bool, enable )
__field( int, refcnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->pp = pp;
- __entry->enable = enable;
__entry->refcnt = refcnt;
),
- TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
- __entry->pp, __entry->enable ? "true" : "false",
+ TP_printk("id=%u, pp=%d, refcnt=%d", __entry->drm_id,
+ __entry->pp,
__entry->refcnt)
);
@@ -592,24 +613,41 @@ TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
);
-TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
- TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+TRACE_EVENT(dpu_enc_phys_vid_irq_enable,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
int refcnt),
- TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_ARGS(drm_id, intf_idx, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d refcnt=%d", __entry->drm_id,
+ __entry->intf_idx,
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_disable,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, refcnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
- __field( bool, enable )
__field( int, refcnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->intf_idx = intf_idx;
- __entry->enable = enable;
__entry->refcnt = refcnt;
),
- TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
- __entry->intf_idx, __entry->enable ? "true" : "false",
+ TP_printk("id=%u, intf_idx=%d refcnt=%d", __entry->drm_id,
+ __entry->intf_idx,
__entry->drm_id)
);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 2a5a68366582..16f144cbc0c9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_edid.h>
+#include <drm/drm_framebuffer.h>
#include "dpu_writeback.h"
@@ -24,6 +25,61 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector)
dev->mode_config.max_height);
}
+static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(wb_conn);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+
+ DPU_DEBUG("[atomic_check:%d]\n", connector->base.id);
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status != connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ mode = &crtc_state->mode;
+
+ fb = conn_state->writeback_job->fb;
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u][mode:\"%s\":%ux%u]\n", fb->base.id, fb->width, fb->height,
+ mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width, mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height, mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > dpu_wb_conn->maxlinewidth) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, dpu_wb_conn->maxlinewidth);
+ return -EINVAL;
+ }
+
+ return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
+}
+
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -59,12 +115,13 @@ static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
.get_modes = dpu_wb_conn_get_modes,
+ .atomic_check = dpu_wb_conn_atomic_check,
.prepare_writeback_job = dpu_wb_conn_prepare_job,
.cleanup_writeback_job = dpu_wb_conn_cleanup_job,
};
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
- const u32 *format_list, u32 num_formats)
+ const u32 *format_list, u32 num_formats, u32 maxlinewidth)
{
struct dpu_wb_connector *dpu_wb_conn;
int rc = 0;
@@ -73,6 +130,8 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
if (!dpu_wb_conn)
return -ENOMEM;
+ dpu_wb_conn->maxlinewidth = maxlinewidth;
+
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
/* DPU initializes the encoder and sets it up completely for writeback
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
index 5a75ea916101..4b11cca8014c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -18,6 +18,7 @@
struct dpu_wb_connector {
struct drm_writeback_connector base;
struct drm_encoder *wb_enc;
+ u32 maxlinewidth;
};
static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
@@ -26,6 +27,6 @@ static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_conne
}
int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
- const u32 *format_list, u32 num_formats);
+ const u32 *format_list, u32 num_formats, u32 maxlinewidth);
#endif /*_DPU_WRITEBACK_H */