summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/i915.rst3
-rw-r--r--drivers/acpi/acpi_lpss.c11
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c78
-rw-r--r--drivers/gpu/drm/drm_file.c42
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c232
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c563
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c198
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c412
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3060
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c228
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c149
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c309
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c411
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h62
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c303
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h11
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c321
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h28
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c188
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c322
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c471
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c71
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c219
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c23
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c171
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c319
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c125
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c20
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h2
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.c36
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.h14
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c42
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.h39
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.c601
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c69
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h73
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c211
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c33
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c275
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c382
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c161
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c149
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c97
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c105
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c228
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c121
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c90
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c47
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c50
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c120
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c360
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c36
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c19
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c178
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c560
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c419
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c203
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.h13
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c72
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c309
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h52
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c733
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h54
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c68
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c58
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c299
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c43
-rw-r--r--drivers/gpu/drm/i915/i915_active.c123
-rw-r--r--drivers/gpu/drm/i915/i915_active.h28
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h15
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c318
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c343
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h91
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c366
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c1
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c47
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c66
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c75
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c21
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c47
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c136
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h149
-rw-r--r--drivers/gpu/drm/i915/i915_request.c271
-rw-r--r--drivers/gpu/drm/i915/i915_request.h27
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c84
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h29
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c35
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h9
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h1
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c323
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c6
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h13
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c129
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.c39
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c40
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h3
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c2
-rw-r--r--include/drm/drm_atomic_state_helper.h6
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_util.h2
-rw-r--r--include/drm/i915_pciids.h31
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/uapi/drm/drm_fourcc.h11
-rw-r--r--include/uapi/drm/i915_drm.h32
240 files changed, 12435 insertions, 7851 deletions
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index d0947c5c4ab8..e539c42a3e78 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -466,9 +466,6 @@ GuC-based command submission
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
:doc: GuC-based command submission
-.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
- :internal:
-
HuC
---
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 70f740b09684..db18df6cb330 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -69,10 +69,6 @@ ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_SAVE_CTX BIT(4)
#define LPSS_NO_D3_DELAY BIT(5)
-/* Crystal Cove PMIC shares same ACPI ID between different platforms */
-#define BYT_CRC_HRV 2
-#define CHT_CRC_HRV 3
-
struct lpss_private_data;
struct lpss_device_desc {
@@ -158,7 +154,7 @@ static void lpss_deassert_reset(struct lpss_private_data *pdata)
*/
static struct pwm_lookup byt_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
- "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
@@ -170,8 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
return;
- if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV))
- pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
+ pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
#define LPSS_I2C_ENABLE 0x6c
@@ -204,7 +199,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
/* BSW PWM used for backlight control by the i915 driver */
static struct pwm_lookup bsw_pwm_lookup[] = {
PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
- "pwm_backlight", 0, PWM_POLARITY_NORMAL,
+ "pwm_soc_backlight", 0, PWM_POLARITY_NORMAL,
"pwm-lpss-platform"),
};
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d3e3ea3b5843..7041323a7bff 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,6 +54,9 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_EXPORT_FOR_TESTS
+ bool
+
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -61,6 +64,7 @@ config DRM_DEBUG_SELFTEST
select PRIME_NUMBERS
select DRM_LIB_RANDOM
select DRM_KMS_HELPER
+ select DRM_EXPORT_FOR_TESTS if m
default n
help
This option provides kernel modules that can be used to run
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index d0a937fb0c56..7cf3cf936547 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -58,6 +58,22 @@
*/
/**
+ * __drm_atomic_helper_crtc_state_reset - reset the CRTC state
+ * @crtc_state: atomic CRTC state, must not be NULL
+ * @crtc: CRTC object, must not be NULL
+ *
+ * Initializes the newly allocated @crtc_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+void
+__drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *crtc_state,
+ struct drm_crtc *crtc)
+{
+ crtc_state->crtc = crtc;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_state_reset);
+
+/**
* __drm_atomic_helper_crtc_reset - reset state on CRTC
* @crtc: drm CRTC
* @crtc_state: CRTC state to assign
@@ -74,7 +90,7 @@ __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
if (crtc_state)
- crtc_state->crtc = crtc;
+ __drm_atomic_helper_crtc_state_reset(crtc_state, crtc);
crtc->state = crtc_state;
}
@@ -212,23 +228,43 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * __drm_atomic_helper_plane_state_reset - resets plane state to default values
+ * @plane_state: atomic plane state, must not be NULL
* @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
*
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
+ * Initializes the newly allocated @plane_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
*/
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state)
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
+ struct drm_plane *plane)
{
- state->plane = plane;
- state->rotation = DRM_MODE_ROTATE_0;
+ plane_state->plane = plane;
+ plane_state->rotation = DRM_MODE_ROTATE_0;
- state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ plane_state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ plane_state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
- plane->state = state;
+/**
+ * __drm_atomic_helper_plane_reset - reset state on plane
+ * @plane: drm plane
+ * @plane_state: plane state to assign
+ *
+ * Initializes the newly allocated @plane_state and assigns it to
+ * the &drm_crtc->state pointer of @plane, usually required when
+ * initializing the drivers or when called from the &drm_plane_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ if (plane_state)
+ __drm_atomic_helper_plane_state_reset(plane_state, plane);
+
+ plane->state = plane_state;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
@@ -336,6 +372,22 @@ void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
/**
+ * __drm_atomic_helper_connector_state_reset - reset the connector state
+ * @conn_state: atomic connector state, must not be NULL
+ * @connector: connectotr object, must not be NULL
+ *
+ * Initializes the newly allocated @conn_state with default
+ * values. This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector)
+{
+ conn_state->connector = connector;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_state_reset);
+
+/**
* __drm_atomic_helper_connector_reset - reset state on connector
* @connector: drm connector
* @conn_state: connector state to assign
@@ -352,7 +404,7 @@ __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state)
{
if (conn_state)
- conn_state->connector = connector;
+ __drm_atomic_helper_connector_state_reset(conn_state, connector);
connector->state = conn_state;
}
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 8b5a06013ccc..92d16724f949 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -31,7 +31,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
@@ -754,3 +756,43 @@ void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);
+
+/**
+ * mock_drm_getfile - Create a new struct file for the drm device
+ * @minor: drm minor to wrap (e.g. #drm_device.primary)
+ * @flags: file creation mode (O_RDWR etc)
+ *
+ * This create a new struct file that wraps a DRM file context around a
+ * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
+ * invoking userspace. The struct file may be operated on using its f_op
+ * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
+ * to userspace facing functions as an internal/anonymous client.
+ *
+ * RETURNS:
+ * Pointer to newly created struct file, ERR_PTR on failure.
+ */
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_file *priv;
+ struct file *file;
+
+ priv = drm_file_alloc(minor);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
+ if (IS_ERR(file)) {
+ drm_file_free(priv);
+ return file;
+ }
+
+ /* Everyone shares a single global address space */
+ file->f_mapping = dev->anon_inode->i_mapping;
+
+ drm_dev_get(dev);
+ priv->filp = file;
+
+ return file;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 438040ff0179..1cb28c20807c 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -27,6 +27,7 @@ config DRM_I915_DEBUG
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
+ select DRM_EXPORT_FOR_TESTS if m
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -149,6 +150,7 @@ config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
default n
+ select DRM_EXPORT_FOR_TESTS if m
select FAULT_INJECTION
select PRIME_NUMBERS
help
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 90dcf09f52cc..b0c53661f62b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,9 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
obj-y += gt/
gt-y += \
+ gt/debugfs_engines.o \
+ gt/debugfs_gt.o \
+ gt/debugfs_gt_pm.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
@@ -259,6 +262,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
selftests/igt_live_test.o \
+ selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 325df29b0447..006b1a297e6f 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,6 +34,7 @@
#include "intel_ddi.h"
#include "intel_dsi.h"
#include "intel_panel.h"
+#include "intel_vdsc.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -276,7 +277,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
u32 dss_ctl2;
u16 hactive = adjusted_mode->crtc_hdisplay;
u16 dl_buffer_depth;
@@ -301,18 +302,31 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
I915_WRITE(DSS_CTL1, dss_ctl1);
}
-static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+/* aka DSI 8X clock */
+static int afe_clk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int bpp;
+
+ if (crtc_state->dsc.compression_enable)
+ bpp = crtc_state->dsc.compressed_bpp;
+ else
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ return DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, intel_dsi->lane_count);
+}
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
- u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 afe_clk_khz; /* 8X Clock */
+ int afe_clk_khz;
u32 esc_clk_div_m;
- afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
- intel_dsi->lane_count);
-
+ afe_clk_khz = afe_clk(encoder, crtc_state);
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
@@ -490,7 +504,9 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+static void
+gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -531,7 +547,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
* leave all fields at HW default values.
*/
if (IS_GEN(dev_priv, 11)) {
- if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
@@ -625,7 +641,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
enum port port;
@@ -641,7 +657,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
tmp |= EOTP_DISABLED;
/* enable link calibration if freq > 1.5Gbps */
- if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ if (afe_clk(encoder, pipe_config) >= 1500 * 1000) {
tmp &= ~LINK_CALIBRATION_MASK;
tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
}
@@ -667,22 +683,26 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select pixel format */
tmp &= ~PIX_FMT_MASK;
- switch (intel_dsi->pixel_format) {
- default:
- MISSING_CASE(intel_dsi->pixel_format);
- /* fallthrough */
- case MIPI_DSI_FMT_RGB565:
- tmp |= PIX_FMT_RGB565;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- tmp |= PIX_FMT_RGB666_PACKED;
- break;
- case MIPI_DSI_FMT_RGB666:
- tmp |= PIX_FMT_RGB666_LOOSE;
- break;
- case MIPI_DSI_FMT_RGB888:
- tmp |= PIX_FMT_RGB888;
- break;
+ if (pipe_config->dsc.compression_enable) {
+ tmp |= PIX_FMT_COMPRESSED;
+ } else {
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
}
if (INTEL_GEN(dev_priv) >= 12) {
@@ -745,6 +765,9 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
case PIPE_C:
tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
+ case PIPE_D:
+ tmp |= TRANS_DDI_EDP_INPUT_D_ONOFF;
+ break;
}
/* enable DDI buffer */
@@ -763,12 +786,12 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
static void
gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port;
enum transcoder dsi_trans;
/* horizontal timings */
@@ -776,11 +799,25 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+ int mul = 1, div = 1;
+
+ /*
+ * Adjust horizontal timings (htotal, hsync_start, hsync_end) to account
+ * for slower link speed if DSC is enabled.
+ *
+ * The compression frequency ratio is the ratio between compressed and
+ * non-compressed link speeds, and simplifies down to the ratio between
+ * compressed and non-compressed bpp.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ mul = crtc_state->dsc.compressed_bpp;
+ div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ }
hactive = adjusted_mode->crtc_hdisplay;
- htotal = adjusted_mode->crtc_htotal;
- hsync_start = adjusted_mode->crtc_hsync_start;
- hsync_end = adjusted_mode->crtc_hsync_end;
+ htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
hsync_size = hsync_end - hsync_start;
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
@@ -904,7 +941,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -919,7 +957,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
* TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
* ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
*/
- divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * afe_clk(encoder, crtc_state) * 1000;
mul = 8 * 1000000;
hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
divisor);
@@ -955,7 +993,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -972,13 +1010,13 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_enable_ddi_buffer(encoder);
/* setup D-PHY timings */
- gen11_dsi_setup_dphy_timings(encoder);
+ gen11_dsi_setup_dphy_timings(encoder, crtc_state);
/* step 4h: setup DSI protocol timeouts */
- gen11_dsi_setup_timeouts(encoder);
+ gen11_dsi_setup_timeouts(encoder, crtc_state);
/* Step (4h, 4i, 4j, 4k): Configure transcoder */
- gen11_dsi_configure_transcoder(encoder, pipe_config);
+ gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
if (IS_GEN(dev_priv, 11))
@@ -1025,14 +1063,14 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
+ const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
- gen11_dsi_program_esc_clk_div(encoder);
+ gen11_dsi_program_esc_clk_div(encoder, crtc_state);
}
static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
@@ -1050,6 +1088,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step5: program and powerup panel */
gen11_dsi_powerup_panel(encoder);
+ intel_dsc_enable(encoder, pipe_config);
+
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
@@ -1211,12 +1251,42 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
gen11_dsi_disable_io_power(encoder);
}
+static void gen11_dsi_post_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ skylake_scaler_disable(old_crtc_state);
+}
+
+static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: DSC? */
+ return intel_dsi_mode_valid(connector, mode);
+}
+
static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
+
+ if (pipe_config->dsc.compressed_bpp) {
+ int div = pipe_config->dsc.compressed_bpp;
+ int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ adjusted_mode->crtc_htotal =
+ DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
+ adjusted_mode->crtc_hsync_start =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
+ adjusted_mode->crtc_hsync_end =
+ DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
+ }
if (intel_dsi->dual_link) {
adjusted_mode->crtc_hdisplay *= 2;
@@ -1242,22 +1312,66 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ intel_dsc_get_config(encoder, pipe_config);
+
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
pipe_config->port_clock =
cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
- pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
- pipe_config->base.adjusted_mode.crtc_clock *= 2;
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
gen11_dsi_get_timings(encoder, pipe_config);
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
}
+static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int dsc_max_bpc = INTEL_GEN(dev_priv) >= 12 ? 12 : 10;
+ bool use_dsc;
+ int ret;
+
+ use_dsc = intel_bios_get_dsc_params(encoder, crtc_state, dsc_max_bpc);
+ if (!use_dsc)
+ return 0;
+
+ if (crtc_state->pipe_bpp < 8 * 3)
+ return -EINVAL;
+
+ /* FIXME: split only when necessary */
+ if (crtc_state->dsc.slice_count > 1)
+ crtc_state->dsc.dsc_split = true;
+
+ vdsc_cfg->convert_rgb = true;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /* DSI specific sanity checks on the common code */
+ WARN_ON(vdsc_cfg->vbr_enable);
+ WARN_ON(vdsc_cfg->simple_422);
+ WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ WARN_ON(vdsc_cfg->slice_height < 8);
+ WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+
+ ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
+ if (ret)
+ return ret;
+
+ crtc_state->dsc.compression_enable = true;
+
+ return 0;
+}
+
static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1265,11 +1379,11 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode =
intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -1283,8 +1397,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
pipe_config->clock_set = true;
- pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
+ DRM_DEBUG_KMS("Attempting to use DSC failed\n");
+
+ pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
return 0;
}
@@ -1292,8 +1415,13 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- get_dsi_io_power_domains(to_i915(encoder->base.dev),
- enc_to_intel_dsi(&encoder->base));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ get_dsi_io_power_domains(i915, enc_to_intel_dsi(&encoder->base));
+
+ if (crtc_state->dsc.compression_enable)
+ intel_display_power_get(i915,
+ intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -1325,6 +1453,9 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
*pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ *pipe = PIPE_D;
+ break;
default:
DRM_ERROR("Invalid PIPE input\n");
goto out;
@@ -1360,7 +1491,7 @@ static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
- .mode_valid = intel_dsi_mode_valid,
+ .mode_valid = gen11_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};
@@ -1577,6 +1708,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
encoder->disable = gen11_dsi_disable;
+ encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->update_pipe = intel_panel_update_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c2875b10adf9..fd0026fc3618 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -186,13 +186,22 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
struct drm_crtc_state *
intel_crtc_duplicate_state(struct drm_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state = to_intel_crtc_state(crtc->state);
struct intel_crtc_state *crtc_state;
- crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = kmemdup(old_crtc_state, sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return NULL;
- __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->uapi);
+
+ /* copy color blobs */
+ if (crtc_state->hw.degamma_lut)
+ drm_property_blob_get(crtc_state->hw.degamma_lut);
+ if (crtc_state->hw.ctm)
+ drm_property_blob_get(crtc_state->hw.ctm);
+ if (crtc_state->hw.gamma_lut)
+ drm_property_blob_get(crtc_state->hw.gamma_lut);
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
@@ -205,7 +214,29 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fb_bits = 0;
crtc_state->update_planes = 0;
- return &crtc_state->base;
+ return &crtc_state->uapi;
+}
+
+static void intel_crtc_put_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_blob_put(crtc_state->hw.degamma_lut);
+ drm_property_blob_put(crtc_state->hw.gamma_lut);
+ drm_property_blob_put(crtc_state->hw.ctm);
+}
+
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_put_color_blobs(crtc_state);
+}
+
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state)
+{
+ drm_property_replace_blob(&crtc_state->hw.degamma_lut,
+ crtc_state->uapi.degamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.gamma_lut,
+ crtc_state->uapi.gamma_lut);
+ drm_property_replace_blob(&crtc_state->hw.ctm,
+ crtc_state->uapi.ctm);
}
/**
@@ -220,7 +251,11 @@ void
intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- drm_atomic_helper_crtc_destroy_state(crtc, state);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ kfree(crtc_state);
}
static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
@@ -249,10 +284,10 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
return;
/* set scaler mode */
- if (plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->is_yuv &&
- plane_state->base.fb->format->num_planes > 1) {
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ if (plane_state && plane_state->hw.fb &&
+ plane_state->hw.fb->format->is_yuv &&
+ plane_state->hw.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
@@ -319,7 +354,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_plane_state *plane_state = NULL;
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->base.state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 49d5cb1b9e0a..7b49623419ba 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -36,6 +36,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector);
struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc);
void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
+void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 98f557a9f8ee..3e97af682b1b 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -41,6 +41,16 @@
#include "intel_pm.h"
#include "intel_sprite.h"
+static void intel_plane_state_reset(struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ memset(plane_state, 0, sizeof(*plane_state));
+
+ __drm_atomic_helper_plane_state_reset(&plane_state->uapi, &plane->base);
+
+ plane_state->scaler_id = -1;
+}
+
struct intel_plane *intel_plane_alloc(void)
{
struct intel_plane_state *plane_state;
@@ -56,8 +66,9 @@ struct intel_plane *intel_plane_alloc(void)
return ERR_PTR(-ENOMEM);
}
- __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
- plane_state->scaler_id = -1;
+ intel_plane_state_reset(plane_state, plane);
+
+ plane->base.state = &plane_state->uapi;
return plane;
}
@@ -80,22 +91,24 @@ void intel_plane_free(struct intel_plane *plane)
struct drm_plane_state *
intel_plane_duplicate_state(struct drm_plane *plane)
{
- struct drm_plane_state *state;
struct intel_plane_state *intel_state;
- intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
+ intel_state = to_intel_plane_state(plane->state);
+ intel_state = kmemdup(intel_state, sizeof(*intel_state), GFP_KERNEL);
if (!intel_state)
return NULL;
- state = &intel_state->base;
-
- __drm_atomic_helper_plane_duplicate_state(plane, state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &intel_state->uapi);
intel_state->vma = NULL;
intel_state->flags = 0;
- return state;
+ /* add reference to fb */
+ if (intel_state->hw.fb)
+ drm_framebuffer_get(intel_state->hw.fb);
+
+ return &intel_state->uapi;
}
/**
@@ -110,18 +123,22 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- WARN_ON(to_intel_plane_state(state)->vma);
+ struct intel_plane_state *plane_state = to_intel_plane_state(state);
+ WARN_ON(plane_state->vma);
- drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+ kfree(plane_state);
}
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
cpp = fb->format->cpp[0];
@@ -144,10 +161,10 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
struct intel_crtc_state *crtc_state;
- if (!plane_state->base.visible || !plane->min_cdclk)
+ if (!plane_state->uapi.visible || !plane->min_cdclk)
return false;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -176,23 +193,52 @@ bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
return false;
}
+static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.fb)
+ drm_framebuffer_put(plane_state->hw.fb);
+
+ memset(&plane_state->hw, 0, sizeof(plane_state->hw));
+}
+
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state)
+{
+ intel_plane_clear_hw_state(plane_state);
+
+ plane_state->hw.crtc = from_plane_state->uapi.crtc;
+ plane_state->hw.fb = from_plane_state->uapi.fb;
+ if (plane_state->hw.fb)
+ drm_framebuffer_get(plane_state->hw.fb);
+
+ plane_state->hw.alpha = from_plane_state->uapi.alpha;
+ plane_state->hw.pixel_blend_mode =
+ from_plane_state->uapi.pixel_blend_mode;
+ plane_state->hw.rotation = from_plane_state->uapi.rotation;
+ plane_state->hw.color_encoding = from_plane_state->uapi.color_encoding;
+ plane_state->hw.color_range = from_plane_state->uapi.color_range;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
- struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
- const struct drm_framebuffer *fb = new_plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ const struct drm_framebuffer *fb;
int ret;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+ fb = new_plane_state->hw.fb;
+
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
new_crtc_state->min_cdclk[plane->id] = 0;
- new_plane_state->base.visible = false;
+ new_plane_state->uapi.visible = false;
- if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
+ if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
ret = plane->check_plane(new_crtc_state, new_plane_state);
@@ -200,18 +246,18 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
return ret;
/* FIXME pre-g4x don't work like this */
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
new_crtc_state->active_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
- drm_format_info_is_yuv_semiplanar(fb->format))
+ if (new_plane_state->uapi.visible &&
+ intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
new_crtc_state->nv12_planes |= BIT(plane->id);
- if (new_plane_state->base.visible &&
+ if (new_plane_state->uapi.visible &&
fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
- if (new_plane_state->base.visible || old_plane_state->base.visible)
+ if (new_plane_state->uapi.visible || old_plane_state->uapi.visible)
new_crtc_state->update_planes |= BIT(plane->id);
new_crtc_state->data_rate[plane->id] =
@@ -225,11 +271,11 @@ static struct intel_crtc *
get_crtc_from_states(const struct intel_plane_state *old_plane_state,
const struct intel_plane_state *new_plane_state)
{
- if (new_plane_state->base.crtc)
- return to_intel_crtc(new_plane_state->base.crtc);
+ if (new_plane_state->uapi.crtc)
+ return to_intel_crtc(new_plane_state->uapi.crtc);
- if (old_plane_state->base.crtc)
- return to_intel_crtc(old_plane_state->base.crtc);
+ if (old_plane_state->uapi.crtc)
+ return to_intel_crtc(old_plane_state->uapi.crtc);
return NULL;
}
@@ -246,7 +292,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
- new_plane_state->base.visible = false;
+ new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
@@ -307,26 +353,16 @@ void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_update_plane(&plane->base, crtc);
plane->update_plane(plane, crtc_state, plane_state);
}
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, crtc_state, plane_state);
-}
-
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(plane, crtc_state);
@@ -355,25 +391,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
- if (new_plane_state->base.visible) {
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->planar_slave) {
- struct intel_plane *master =
- new_plane_state->planar_linked_plane;
-
- /*
- * We update the slave plane from this function because
- * programming it from the master plane's update_plane
- * callback runs into issues when the Y plane is
- * reassigned, disabled or used by a different plane.
- *
- * The slave plane is updated with the master plane's
- * plane_state.
- */
- new_plane_state =
- intel_atomic_get_new_plane_state(state, master);
-
- intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
intel_disable_plane(plane, new_crtc_state);
}
@@ -395,7 +415,7 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
intel_update_plane(plane, new_crtc_state, new_plane_state);
else
intel_disable_plane(plane, new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index e61e9a82aadf..5cedafdddb55 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -20,12 +20,11 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
-void intel_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 85e6b2bbb34f..27710098d056 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -234,7 +234,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@@ -555,7 +555,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
u32 tmp, eldv;
@@ -602,7 +602,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
@@ -692,10 +692,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
@@ -753,7 +753,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 63c1bd4c2954..8beac06e3f10 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -29,6 +29,7 @@
#include <drm/i915_drm.h>
#include "display/intel_display.h"
+#include "display/intel_display_types.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -58,6 +59,13 @@
* that.
*/
+/* Wrapper for VBT child device config */
+struct display_device_data {
+ struct child_device_config child;
+ struct dsc_compression_parameters_entry *dsc;
+ struct list_head node;
+};
+
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
@@ -202,17 +210,12 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
}
-/* Try to find integrated panel data */
+/* Parse general panel options */
static void
-parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_panel_options(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- const struct lvds_dvo_timing *panel_dvo_timing;
- const struct lvds_fp_timing *fp_timing;
- struct drm_display_mode *panel_fixed_mode;
int panel_type;
int drrs_mode;
int ret;
@@ -261,6 +264,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
break;
}
+}
+
+/* Try to find integrated panel timing data */
+static void
+parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
+ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int panel_type = dev_priv->vbt.panel_type;
lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
if (!lvds_lfp_data)
@@ -282,7 +298,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -300,6 +316,98 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
static void
+parse_generic_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_generic_dtd *generic_dtd;
+ const struct generic_dtd_entry *dtd;
+ struct drm_display_mode *panel_fixed_mode;
+ int num_dtd;
+
+ generic_dtd = find_section(bdb, BDB_GENERIC_DTD);
+ if (!generic_dtd)
+ return;
+
+ if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
+ return;
+ } else if (generic_dtd->gdtd_size !=
+ sizeof(struct generic_dtd_entry)) {
+ DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ /* DTD has unknown fields, but keep going */
+ }
+
+ num_dtd = (get_blocksize(generic_dtd) -
+ sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
+ if (dev_priv->vbt.panel_type >= num_dtd) {
+ DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
+ return;
+ }
+
+ dtd = &generic_dtd->dtd[dev_priv->vbt.panel_type];
+
+ panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+ if (!panel_fixed_mode)
+ return;
+
+ panel_fixed_mode->hdisplay = dtd->hactive;
+ panel_fixed_mode->hsync_start =
+ panel_fixed_mode->hdisplay + dtd->hfront_porch;
+ panel_fixed_mode->hsync_end =
+ panel_fixed_mode->hsync_start + dtd->hsync;
+ panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+
+ panel_fixed_mode->vdisplay = dtd->vactive;
+ panel_fixed_mode->vsync_start =
+ panel_fixed_mode->vdisplay + dtd->vfront_porch;
+ panel_fixed_mode->vsync_end =
+ panel_fixed_mode->vsync_start + dtd->vsync;
+ panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+
+ panel_fixed_mode->clock = dtd->pixel_clock;
+ panel_fixed_mode->width_mm = dtd->width_mm;
+ panel_fixed_mode->height_mm = dtd->height_mm;
+
+ panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(panel_fixed_mode);
+
+ if (dtd->hsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (dtd->vsync_positive_polarity)
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+
+ dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
+}
+
+static void
+parse_panel_dtd(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ /*
+ * Older VBTs provided provided DTD information for internal displays
+ * through the "LFP panel DTD" block (42). As of VBT revision 229,
+ * that block is now deprecated and DTD information should be provided
+ * via a newer "generic DTD" block (58). Just to be safe, we'll
+ * try the new generic DTD block first on VBT >= 229, but still fall
+ * back to trying the old LFP block if that fails.
+ */
+ if (bdb->version >= 229)
+ parse_generic_dtd(dev_priv, bdb);
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(dev_priv, bdb);
+}
+
+static void
parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
@@ -449,8 +557,9 @@ static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
{
struct sdvo_device_mapping *mapping;
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i, count = 0;
+ int count = 0;
/*
* Only parse SDVO mappings on gens that could have SDVO. This isn't
@@ -461,8 +570,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
return;
}
- for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
@@ -552,16 +661,45 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.int_lvds_support = 0;
}
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ if (bdb->version < 228) {
+ DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ /*
+ * If DRRS is not supported, drrs_type has to be set to 0.
+ * This is because, VBT is configured in such a way that
+ * static DRRS is 0 and DRRS not supported is represented by
+ * driver->drrs_enabled=false
+ */
+ if (!driver->drrs_enabled)
+ dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+
+ dev_priv->vbt.psr.enable = driver->psr_enabled;
+ }
+}
+
+static void
+parse_power_conservation_features(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_lfp_power *power;
+ u8 panel_type = dev_priv->vbt.panel_type;
+
+ if (bdb->version < 228)
+ return;
+
+ power = find_section(bdb, BDB_LVDS_POWER);
+ if (!power)
+ return;
+
+ dev_priv->vbt.psr.enable = power->psr & BIT(panel_type);
+
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
* static DRRS is 0 and DRRS not supported is represented by
- * driver->drrs_enabled=false
+ * power->drrs & BIT(panel_type)=false
*/
- if (!driver->drrs_enabled)
+ if (!(power->drrs & BIT(panel_type)))
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- dev_priv->vbt.psr.enable = driver->psr_enabled;
}
static void
@@ -1230,6 +1368,57 @@ err:
memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
+static void
+parse_compression_parameters(struct drm_i915_private *i915,
+ const struct bdb_header *bdb)
+{
+ const struct bdb_compression_parameters *params;
+ struct display_device_data *devdata;
+ const struct child_device_config *child;
+ u16 block_size;
+ int index;
+
+ if (bdb->version < 198)
+ return;
+
+ params = find_section(bdb, BDB_COMPRESSION_PARAMETERS);
+ if (params) {
+ /* Sanity checks */
+ if (params->entry_size != sizeof(params->data[0])) {
+ DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ return;
+ }
+
+ block_size = get_blocksize(params);
+ if (block_size < sizeof(*params)) {
+ DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ return;
+ }
+ }
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!child->compression_enable)
+ continue;
+
+ if (!params) {
+ DRM_DEBUG_KMS("VBT: compression params not available\n");
+ continue;
+ }
+
+ if (child->compression_method_cps) {
+ DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ continue;
+ }
+
+ index = child->compression_structure_index;
+
+ devdata->dsc = kmemdup(&params->data[index],
+ sizeof(*devdata->dsc), GFP_KERNEL);
+ }
+}
+
static u8 translate_iboost(u8 val)
{
static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
@@ -1246,7 +1435,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && ddc_pin == info->alternate_ddc_pin)
@@ -1297,7 +1486,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
const struct ddi_vbt_port_info *info;
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
info = &i915->vbt.ddi_port_info[port];
if (info->child && aux_ch == info->alternate_aux_channel)
@@ -1418,9 +1607,10 @@ static enum port dvo_port_to_port(u8 dvo_port)
}
static void parse_ddi_port(struct drm_i915_private *dev_priv,
- const struct child_device_config *child,
+ struct display_device_data *devdata,
u8 bdb_version)
{
+ const struct child_device_config *child = &devdata->child;
struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
enum port port;
@@ -1443,7 +1633,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
- if (port == PORT_A && is_dvi) {
+ if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
is_hdmi ? "/HDMI" : "");
is_dvi = false;
@@ -1461,26 +1651,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+ DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt);
-
- if (is_edp && is_dvi)
- DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
- port_name(port));
- if (is_crt && port != PORT_E)
- DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
- if (is_crt && (is_dvi || is_dp))
- DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
- port_name(port));
- if (is_dvi && (port == PORT_A || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
- if (!is_dvi && !is_dp && !is_crt)
- DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
- port_name(port));
- if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
- DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1509,6 +1684,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
port_name(port),
hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
+ info->hdmi_level_shift_set = true;
}
if (bdb_version >= 204) {
@@ -1571,8 +1747,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- const struct child_device_config *child;
- int i;
+ struct display_device_data *devdata;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
@@ -1580,11 +1755,8 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
if (bdb_version < 155)
return;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- parse_ddi_port(dev_priv, child, bdb_version);
- }
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node)
+ parse_ddi_port(dev_priv, devdata, bdb_version);
}
static void
@@ -1592,8 +1764,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *defs;
+ struct display_device_data *devdata;
const struct child_device_config *child;
- int i, child_device_num, count;
+ int i, child_device_num;
u8 expected_size;
u16 block_size;
int bus_pin;
@@ -1649,26 +1822,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
/* get the number of child device */
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
- count = 0;
- /* get the number of child device that is present */
- for (i = 0; i < child_device_num; i++) {
- child = child_device_ptr(defs, i);
- if (!child->device_type)
- continue;
- count++;
- }
- if (!count) {
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
- return;
- }
- dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
- if (!dev_priv->vbt.child_dev) {
- DRM_DEBUG_KMS("No memory space for child device\n");
- return;
- }
- dev_priv->vbt.child_dev_num = count;
- count = 0;
for (i = 0; i < child_device_num; i++) {
child = child_device_ptr(defs, i);
if (!child->device_type)
@@ -1677,23 +1831,29 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
child->device_type);
+ devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
+ if (!devdata)
+ break;
+
/*
* Copy as much as we know (sizeof) and is available
- * (child_dev_size) of the child device. Accessing the data must
- * depend on VBT version.
+ * (child_dev_size) of the child device config. Accessing the
+ * data must depend on VBT version.
*/
- memcpy(dev_priv->vbt.child_dev + count, child,
+ memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- count++;
+
+ list_add_tail(&devdata->node, &dev_priv->vbt.display_devices);
}
+
+ if (list_empty(&dev_priv->vbt.display_devices))
+ DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
- enum port port;
-
dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
@@ -1721,13 +1881,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
-
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
- struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
-
- info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
- }
}
/* Defaults to initialize only if there is no VBT. */
@@ -1736,7 +1889,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
{
enum port port;
- for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ for_each_port(port) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -1787,6 +1940,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
}
+ if (vbt->vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ return false;
+ }
+
+ size = vbt->vbt_size;
+
if (range_overflows_t(size_t,
vbt->bdb_offset,
sizeof(struct bdb_header),
@@ -1804,28 +1964,61 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return vbt;
}
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
{
- size_t i;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ void __iomem *p = NULL, *oprom;
+ struct vbt_header *vbt;
+ u16 vbt_size;
+ size_t i, size;
- /* Scour memory looking for the VBT signature. */
- for (i = 0; i + 4 < size; i++) {
- void *vbt;
+ oprom = pci_map_rom(pdev, &size);
+ if (!oprom)
+ return NULL;
- if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+ /* Scour memory looking for the VBT signature. */
+ for (i = 0; i + 4 < size; i += 4) {
+ if (ioread32(oprom + i) != *((const u32 *)"$VBT"))
continue;
- /*
- * This is the one place where we explicitly discard the address
- * space (__iomem) of the BIOS/VBT.
- */
- vbt = (void __force *) bios + i;
- if (intel_bios_is_valid_vbt(vbt, size - i))
- return vbt;
-
+ p = oprom + i;
+ size -= i;
break;
}
+ if (!p)
+ goto err_unmap_oprom;
+
+ if (sizeof(struct vbt_header) > size) {
+ DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ goto err_unmap_oprom;
+ }
+
+ vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
+ if (vbt_size > size) {
+ DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ goto err_unmap_oprom;
+ }
+
+ /* The rest will be validated by intel_bios_is_valid_vbt() */
+ vbt = kmalloc(vbt_size, GFP_KERNEL);
+ if (!vbt)
+ goto err_unmap_oprom;
+
+ memcpy_fromio(vbt, p, vbt_size);
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ pci_unmap_rom(pdev, oprom);
+
+ return vbt;
+
+err_free_vbt:
+ kfree(vbt);
+err_unmap_oprom:
+ pci_unmap_rom(pdev, oprom);
+
return NULL;
}
@@ -1839,10 +2032,11 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
*/
void intel_bios_init(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
+ struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- u8 __iomem *bios = NULL;
+
+ INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
@@ -1853,15 +2047,11 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
- size_t size;
-
- bios = pci_map_rom(pdev, &size);
- if (!bios)
+ oprom_vbt = oprom_get_vbt(dev_priv);
+ if (!oprom_vbt)
goto out;
- vbt = find_vbt(bios, size);
- if (!vbt)
- goto out;
+ vbt = oprom_vbt;
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
@@ -1874,15 +2064,20 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
parse_general_definitions(dev_priv, bdb);
- parse_lfp_panel_data(dev_priv, bdb);
+ parse_panel_options(dev_priv, bdb);
+ parse_panel_dtd(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
+ parse_power_conservation_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
+ /* Depends on child device list */
+ parse_compression_parameters(dev_priv, bdb);
+
/* Further processing on pre-parsed data */
parse_sdvo_device_mapping(dev_priv, bdb->version);
parse_ddi_ports(dev_priv, bdb->version);
@@ -1893,8 +2088,7 @@ out:
init_vbt_missing_defaults(dev_priv);
}
- if (bios)
- pci_unmap_rom(pdev, bios);
+ kfree(oprom_vbt);
}
/**
@@ -1903,9 +2097,14 @@ out:
*/
void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
+ struct display_device_data *devdata, *n;
+
+ list_for_each_entry_safe(devdata, n, &dev_priv->vbt.display_devices, node) {
+ list_del(&devdata->node);
+ kfree(devdata->dsc);
+ kfree(devdata);
+ }
+
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
@@ -1929,17 +2128,18 @@ void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
if (!dev_priv->vbt.int_tv_support)
return false;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
+
/*
* If the device type is not TV, continue.
*/
@@ -1971,14 +2171,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
- int i;
- if (!dev_priv->vbt.child_dev_num)
+ if (list_empty(&dev_priv->vbt.display_devices))
return true;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -2020,6 +2220,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
@@ -2030,7 +2231,6 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- int i;
if (HAS_DDI(dev_priv)) {
const struct ddi_vbt_port_info *port_info =
@@ -2045,11 +2245,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if ((child->dvo_port == port_mapping[port].dp ||
child->dvo_port == port_mapping[port].hdmi) &&
@@ -2070,6 +2267,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
@@ -2078,16 +2276,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_E] = DVO_PORT_DPE,
[PORT_F] = DVO_PORT_DPF,
};
- int i;
if (HAS_DDI(dev_priv))
return dev_priv->vbt.ddi_port_info[port].supports_edp;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (child->dvo_port == port_mapping[port] &&
(child->device_type & DEVICE_TYPE_eDP_BITS) ==
@@ -2136,13 +2330,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct child_device_config *child;
- int i;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ const struct display_device_data *devdata;
- if (child_dev_is_dp_dual_mode(child, port))
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ if (child_dev_is_dp_dual_mode(&devdata->child, port))
return true;
}
@@ -2159,12 +2350,12 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
+ const struct display_device_data *devdata;
const struct child_device_config *child;
u8 dvo_port;
- int i;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
+ list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
+ child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
@@ -2188,6 +2379,104 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
return false;
}
+static void fill_dsc(struct intel_crtc_state *crtc_state,
+ struct dsc_compression_parameters_entry *dsc,
+ int dsc_max_bpc)
+{
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int bpc = 8;
+
+ vdsc_cfg->dsc_version_major = dsc->version_major;
+ vdsc_cfg->dsc_version_minor = dsc->version_minor;
+
+ if (dsc->support_12bpc && dsc_max_bpc >= 12)
+ bpc = 12;
+ else if (dsc->support_10bpc && dsc_max_bpc >= 10)
+ bpc = 10;
+ else if (dsc->support_8bpc && dsc_max_bpc >= 8)
+ bpc = 8;
+ else
+ DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
+ dsc_max_bpc);
+
+ crtc_state->pipe_bpp = bpc * 3;
+
+ crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp));
+
+ /*
+ * FIXME: This is ugly, and slice count should take DSC engine
+ * throughput etc. into account.
+ *
+ * Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ */
+ if (dsc->slices_per_line & BIT(2)) {
+ crtc_state->dsc.slice_count = 4;
+ } else if (dsc->slices_per_line & BIT(1)) {
+ crtc_state->dsc.slice_count = 2;
+ } else {
+ /* FIXME */
+ if (!(dsc->slices_per_line & BIT(0)))
+ DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+
+ crtc_state->dsc.slice_count = 1;
+ }
+
+ if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
+ crtc_state->dsc.slice_count != 0)
+ DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
+ crtc_state->hw.adjusted_mode.crtc_hdisplay,
+ crtc_state->dsc.slice_count);
+
+ /*
+ * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the
+ * implementation specific physical rate buffer size. Currently we use
+ * the required rate buffer model size calculated in
+ * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E.
+ *
+ * The VBT rc_buffer_block_size and rc_buffer_size definitions
+ * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC
+ * implementation should also use the DPCD (or perhaps VBT for eDP)
+ * provided value for the buffer size.
+ */
+
+ /* FIXME: DSI spec says bpc + 1 for this one */
+ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+
+ vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
+
+ vdsc_cfg->slice_height = dsc->slice_height;
+}
+
+/* FIXME: initially DSI specific */
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct display_device_data *devdata;
+ const struct child_device_config *child;
+
+ list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ child = &devdata->child;
+
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (!devdata->dsc)
+ return false;
+
+ if (crtc_state)
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
* @i915: i915 device instance
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 98f064828a57..d6a0c29d37ac 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -35,6 +35,8 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
enum port;
enum intel_backlight_type {
@@ -242,5 +244,8 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int dsc_max_bpc);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 22e83f857de8..dcb66a33be9b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -15,7 +15,7 @@ struct intel_qgv_point {
};
struct intel_qgv_info {
- struct intel_qgv_point points[3];
+ struct intel_qgv_point points[I915_NUM_QGV_POINTS];
u8 num_points;
u8 num_channels;
u8 t_bl;
@@ -264,6 +264,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
if (IS_GEN(dev_priv, 12))
icl_get_bw_info(dev_priv, &tgl_sa_info);
else if (IS_GEN(dev_priv, 11))
@@ -273,17 +276,29 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
+ /*
+ * Any bw group has same amount of QGV points
+ */
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[0];
+ unsigned int min_bw = UINT_MAX;
+ int i;
+
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
* the behaviour observed in the wild.
*/
- return min3(icl_max_bw(dev_priv, num_planes, 0),
- icl_max_bw(dev_priv, num_planes, 1),
- icl_max_bw(dev_priv, num_planes, 2));
- else
+ for (i = 0; i < bi->num_qgv_points; i++) {
+ unsigned int bw = icl_max_bw(dev_priv, num_planes, i);
+
+ min_bw = min(bw, min_bw);
+ }
+ return min_bw;
+ } else {
return UINT_MAX;
+ }
}
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
@@ -297,7 +312,7 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat
static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
unsigned int data_rate = 0;
enum plane_id plane_id;
@@ -318,7 +333,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
intel_bw_crtc_data_rate(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ed8c7ce62119..7d1ab1e5b7c3 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1904,7 +1904,7 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int pixel_rate = crtc_state->pixel_rate;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1922,7 +1922,7 @@ static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
int min_cdclk = 0;
@@ -1936,10 +1936,10 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
int min_cdclk;
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
@@ -2076,7 +2076,7 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
- if (crtc_state->base.enable)
+ if (crtc_state->hw.enable)
min_voltage_level = crtc_state->min_voltage_level;
else
min_voltage_level = 0;
@@ -2170,7 +2170,7 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
vco = dev_priv->skl_preferred_vco_freq;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
continue;
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
@@ -2283,11 +2283,11 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (!crtc_state->base.active ||
- drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ if (!crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
continue;
- crtc_state->base.mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
ret = drm_atomic_add_affected_connectors(&state->base,
&crtc->base);
@@ -2368,7 +2368,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
pipe = INVALID_PIPE;
} else {
pipe = INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index aa3a063549c3..3980e8b50c28 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -117,10 +117,10 @@ static bool lut_is_legacy(const struct drm_property_blob *lut)
static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
{
- return !crtc_state->base.degamma_lut &&
- !crtc_state->base.ctm &&
- crtc_state->base.gamma_lut &&
- lut_is_legacy(crtc_state->base.gamma_lut);
+ return !crtc_state->hw.degamma_lut &&
+ !crtc_state->hw.ctm &&
+ crtc_state->hw.gamma_lut &&
+ lut_is_legacy(crtc_state->hw.gamma_lut);
}
/*
@@ -205,7 +205,7 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
@@ -219,7 +219,7 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
u16 coeffs[9])
{
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
int i;
@@ -270,11 +270,11 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -309,10 +309,10 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (crtc_state->base.ctm) {
+ if (crtc_state->hw.ctm) {
u16 coeff[9];
ilk_csc_convert_ctm(crtc_state, coeff);
@@ -338,12 +338,12 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
*/
static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state->base.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ if (crtc_state->hw.ctm) {
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
u16 coeffs[9] = {};
int i;
@@ -404,7 +404,7 @@ static u32 ilk_lut_10(const struct drm_color_lut *color)
static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
int i;
@@ -435,12 +435,12 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
+ i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -453,7 +453,7 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -468,7 +468,7 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
@@ -478,7 +478,7 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val = 0;
@@ -524,8 +524,8 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -547,8 +547,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
i9xx_load_luts(crtc_state);
@@ -654,9 +654,9 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -677,9 +677,9 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
@@ -700,11 +700,11 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
+ const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
u32 i;
/*
@@ -739,7 +739,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -766,8 +766,8 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
/*
* On GLK+ both pipe CSC and degamma LUT are controlled
@@ -777,7 +777,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
* the degama LUT so that we don't have to reload
* it every time the pipe CSC is being enabled.
*/
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
else
glk_load_degamma_lut_linear(crtc_state);
@@ -808,7 +808,7 @@ static void
icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
@@ -822,8 +822,8 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
@@ -854,8 +854,8 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *blob = crtc_state->hw.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
@@ -910,11 +910,11 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsb *dsb = intel_dsb_get(crtc);
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
glk_load_degamma_lut(crtc_state);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -990,9 +990,9 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
cherryview_load_csc_matrix(crtc_state);
@@ -1010,35 +1010,35 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.load_luts(crtc_state);
}
void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
dev_priv->display.color_commit(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- return !old_crtc_state->base.gamma_lut &&
- !old_crtc_state->base.degamma_lut;
+ return !old_crtc_state->hw.gamma_lut &&
+ !old_crtc_state->hw.degamma_lut;
}
static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1050,14 +1050,14 @@ static bool chv_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
if (old_crtc_state->cgm_mode || new_crtc_state->cgm_mode)
return false;
- return !old_crtc_state->base.gamma_lut;
+ return !old_crtc_state->hw.gamma_lut;
}
static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -1068,19 +1068,19 @@ static bool glk_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
* linear hardware degamma mid scanout.
*/
return !old_crtc_state->csc_enable &&
- !old_crtc_state->base.gamma_lut;
+ !old_crtc_state->hw.gamma_lut;
}
int intel_color_check(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
return dev_priv->display.color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (dev_priv->display.read_luts)
dev_priv->display.read_luts(crtc_state);
@@ -1104,16 +1104,16 @@ static bool need_plane_update(struct intel_plane *plane,
static int
intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_plane *plane;
- if (!new_crtc_state->base.active ||
- drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ if (!new_crtc_state->hw.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
return 0;
if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
@@ -1155,9 +1155,9 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
static int check_luts(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
@@ -1205,7 +1205,7 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
@@ -1226,11 +1226,11 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
if (crtc_state_is_legacy_gamma(crtc_state))
return 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
- if (crtc_state->base.gamma_lut)
+ if (crtc_state->hw.gamma_lut)
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
@@ -1306,7 +1306,7 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/*
@@ -1334,8 +1334,8 @@ static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable ||
crtc_state_is_legacy_gamma(crtc_state))
return GAMMA_MODE_MODE_8BIT;
- else if (crtc_state->base.gamma_lut &&
- crtc_state->base.degamma_lut)
+ else if (crtc_state->hw.gamma_lut &&
+ crtc_state->hw.degamma_lut)
return GAMMA_MODE_MODE_SPLIT;
else
return GAMMA_MODE_MODE_10BIT;
@@ -1349,7 +1349,7 @@ static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
* CSC comes after the LUT in degamma, RGB->YCbCr,
* and RGB full->limited range mode.
*/
- if (crtc_state->base.degamma_lut ||
+ if (crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
limited_color_range)
return 0;
@@ -1367,13 +1367,13 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- (crtc_state->base.gamma_lut ||
- crtc_state->base.degamma_lut) &&
+ (crtc_state->hw.gamma_lut ||
+ crtc_state->hw.degamma_lut) &&
!crtc_state->c8_planes;
crtc_state->csc_enable =
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || limited_color_range;
+ crtc_state->hw.ctm || limited_color_range;
crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
@@ -1406,14 +1406,14 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
return ret;
crtc_state->gamma_enable =
- crtc_state->base.gamma_lut &&
+ crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/* On GLK+ degamma LUT is controlled by csc_enable */
crtc_state->csc_enable =
- crtc_state->base.degamma_lut ||
+ crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->base.ctm || crtc_state->limited_color_range;
+ crtc_state->hw.ctm || crtc_state->limited_color_range;
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
@@ -1432,14 +1432,14 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
{
u32 gamma_mode = 0;
- if (crtc_state->base.degamma_lut)
+ if (crtc_state->hw.degamma_lut)
gamma_mode |= PRE_CSC_GAMMA_ENABLE;
- if (crtc_state->base.gamma_lut &&
+ if (crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes)
gamma_mode |= POST_CSC_GAMMA_ENABLE;
- if (!crtc_state->base.gamma_lut ||
+ if (!crtc_state->hw.gamma_lut ||
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
else
@@ -1452,7 +1452,7 @@ static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
{
u32 csc_mode = 0;
- if (crtc_state->base.ctm)
+ if (crtc_state->hw.ctm)
csc_mode |= ICL_CSC_ENABLE;
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
@@ -1540,7 +1540,7 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (HAS_GMCH(dev_priv)) {
@@ -1646,7 +1646,7 @@ static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
static struct drm_property_blob *
i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
@@ -1683,13 +1683,13 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
if (!crtc_state->gamma_enable)
return;
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
}
static struct drm_property_blob *
i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1733,15 +1733,15 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
}
static struct drm_property_blob *
chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1775,7 +1775,7 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
else
i965_read_luts(crtc_state);
}
@@ -1783,7 +1783,7 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
static struct drm_property_blob *
ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
@@ -1822,15 +1822,15 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
}
static struct drm_property_blob *
glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
@@ -1871,9 +1871,9 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
else
- crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 39cc6d79dc85..b2b1336ecdb6 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -132,9 +132,9 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
{
pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
@@ -144,13 +144,13 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, pipe_config);
- pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+ pipe_config->hw.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
- pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+ pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
+ pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -161,8 +161,8 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 adpa;
if (INTEL_GEN(dev_priv) >= 5)
@@ -241,6 +241,14 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ ironlake_pfit_disable(old_crtc_state);
+
intel_ddi_disable_pipe_clock(old_crtc_state);
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
@@ -271,14 +279,14 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ hsw_fdi_link_train(encoder, crtc_state);
intel_ddi_enable_pipe_clock(crtc_state);
}
@@ -288,7 +296,7 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!crtc_state->has_pch_encoder);
@@ -358,7 +366,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -373,7 +381,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -390,7 +398,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2a27fb5d7dc6..c9ba7d7f3787 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -902,11 +902,10 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
+ struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
int n_entries, level, default_entry;
enum phy phy = intel_port_to_phy(dev_priv, port);
- level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
@@ -941,12 +940,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
return 0;
}
- /* Choose a good default if VBT is badly populated */
- if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
- level = default_entry;
-
if (WARN_ON_ONCE(n_entries == 0))
return 0;
+
+ if (port_info->hdmi_level_shift_set)
+ level = port_info->hdmi_level_shift;
+ else
+ level = default_entry;
+
if (WARN_ON_ONCE(level >= n_entries))
level = n_entries - 1;
@@ -1106,18 +1107,14 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
* DDI A (which is used for eDP)
*/
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 temp, i, rx_ctl_val, ddi_pll_sel;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
- intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- }
+ intel_prepare_dp_ddi_buffers(encoder, crtc_state);
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
@@ -1542,7 +1539,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
static void icl_ddi_clock_get(struct intel_encoder *encoder,
@@ -1758,7 +1755,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
@@ -1815,22 +1812,6 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 temp;
-
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (state == true)
- temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- else
- temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
-}
-
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -1840,7 +1821,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
static u32
intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1872,9 +1853,9 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
BUG();
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1930,12 +1911,14 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@@ -1946,7 +1929,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
static void
intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 temp;
@@ -1958,7 +1941,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
@@ -2256,7 +2239,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = encoder->port;
@@ -2274,7 +2257,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP) {
@@ -3016,11 +2999,38 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpll_lock);
}
+static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
+ u32 port_mask, bool ddi_clk_needed)
+{
+ enum port port;
+ u32 val;
+
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_port_masked(port, port_mask) {
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
+ phy);
+
+ if (ddi_clk_needed == !ddi_clk_off)
+ continue;
+
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(phy));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ }
+}
+
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val;
- enum port port;
u32 port_mask;
bool ddi_clk_needed;
@@ -3069,29 +3079,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- val = I915_READ(ICL_DPCLKA_CFGCR0);
- for_each_port_masked(port, port_mask) {
- enum phy phy = intel_port_to_phy(dev_priv, port);
-
- bool ddi_clk_ungated = !(val &
- icl_dpclka_cfgcr0_clk_off(dev_priv,
- phy));
-
- if (ddi_clk_needed == ddi_clk_ungated)
- continue;
-
- /*
- * Punt on the case now where clock is gated, but it would
- * be needed by the port. Something else is really broken then.
- */
- if (WARN_ON(ddi_clk_needed))
- continue;
-
- DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- phy_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- }
+ icl_sanitize_port_clk_off(dev_priv, port_mask, ddi_clk_needed);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -3359,7 +3347,7 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
static void
tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
u32 val;
if (!cstate->dc3co_exitline)
@@ -3374,7 +3362,7 @@ static void
tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
{
u32 val, exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
if (!cstate->dc3co_exitline)
return;
@@ -3392,8 +3380,8 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *cstate)
{
u32 exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+ struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
+ u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
cstate->dc3co_exitline = 0;
@@ -3401,11 +3389,11 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
return;
/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+ if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
encoder->port != PORT_A)
return;
- if (!cstate->has_psr2 || !cstate->base.active)
+ if (!cstate->has_psr2 || !cstate->hw.active)
return;
/*
@@ -3413,7 +3401,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
* PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
*/
exit_scanlines =
- intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+ intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
if (WARN_ON(exit_scanlines > crtc_vdisplay))
return;
@@ -3425,7 +3413,7 @@ static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
{
u32 val;
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) < 12)
return;
@@ -3455,47 +3443,86 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
- /* 1.a got on intel_atomic_commit_tail() */
+ /*
+ * 1. Enable Power Wells
+ *
+ * This was handled at the beginning of intel_atomic_commit_tail(),
+ * before we called down into this function.
+ */
- /* 2. */
+ /* 2. Enable Panel Power if PPS is required */
intel_edp_panel_on(intel_dp);
/*
- * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
- * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
- * haswell_crtc_enable()->intel_enable_shared_dpll()
+ * 3. For non-TBT Type-C ports, set FIA lane count
+ * (DFLEXDPSP.DPX4TXLATC)
+ *
+ * This was done before tgl_ddi_pre_enable_dp by
+ * haswell_crtc_enable()->intel_encoders_pre_pll_enable().
*/
- /* 4.b */
+ /*
+ * 4. Enable the port PLL.
+ *
+ * The PLL enabling itself was already done before this function by
+ * haswell_crtc_enable()->intel_enable_shared_dpll(). We need only
+ * configure the PLL to port mapping here.
+ */
intel_ddi_clk_select(encoder, crtc_state);
- /* 5. */
+ /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
if (!intel_phy_is_tc(dev_priv, phy) ||
dig_port->tc_mode != TC_PORT_TBT_ALT)
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
- /* 6. */
+ /* 6. Program DP_MODE */
icl_program_mg_dp_mode(dig_port, crtc_state);
/*
- * 7.a - Steps in this function should only be executed over MST
- * master, what will be taken in care by MST hook
- * intel_mst_pre_enable_dp()
+ * 7. The rest of the below are substeps under the bspec's "Enable and
+ * Train Display Port" step. Note that steps that are specific to
+ * MST will be handled by intel_mst_pre_enable_dp() before/after it
+ * calls into this function. Also intel_mst_pre_enable_dp() only calls
+ * us when active_mst_links==0, so any steps designated for "single
+ * stream or multi-stream master transcoder" can just be performed
+ * unconditionally here.
+ */
+
+ /*
+ * 7.a Configure Transcoder Clock Select to direct the Port clock to the
+ * Transcoder.
*/
intel_ddi_enable_pipe_clock(crtc_state);
- /* 7.b */
+ /*
+ * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+ * Transport Select
+ */
intel_ddi_config_transcoder_func(crtc_state);
- /* 7.d */
+ /*
+ * 7.c Configure & enable DP_TP_CTL with link training pattern 1
+ * selected
+ *
+ * This will be handled by the intel_dp_start_link_train() farther
+ * down this function.
+ */
+
+ /*
+ * 7.d Type C with DP alternate or fixed/legacy/static connection -
+ * Disable PHY clock gating per Type-C DDI Buffer page
+ */
icl_phy_set_clock_gating(dig_port, false);
- /* 7.e */
+ /* 7.e Configure voltage swing and related IO settings */
tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
encoder->type);
- /* 7.f */
+ /*
+ * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
+ * the used lanes of the DDI.
+ */
if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
@@ -3505,7 +3532,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
lane_reversal);
}
- /* 7.g */
+ /*
+ * 7.g Configure and enable DDI_BUF_CTL
+ * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+ * after 500 us.
+ *
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
@@ -3518,10 +3552,17 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* training
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
- /* 7.c, 7.h, 7.i, 7.j */
+
+ /*
+ * 7.i Follow DisplayPort specification training sequence (see notes for
+ * failure handling)
+ * 7.j If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+ * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+ * (timeout after 800 us)
+ */
intel_dp_start_link_train(intel_dp);
- /* 7.k */
+ /* 7.k Set DP_TP_CTL link training to Normal */
if (!is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
@@ -3534,7 +3575,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
* so not enabling it for now.
*/
- /* 7.l */
+ /* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
intel_dsc_enable(encoder, crtc_state);
}
@@ -3677,7 +3718,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -3761,17 +3802,25 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (!is_mst) {
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+
+ if (INTEL_GEN(dev_priv) < 12 && !is_mst)
intel_ddi_disable_pipe_clock(old_crtc_state);
- /*
- * Power down sink before disabling the port, otherwise we end
- * up getting interrupts from the sink on detecting link loss.
- */
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- }
intel_disable_ddi_buf(encoder, old_crtc_state);
+ /*
+ * From TGL spec: "If single stream or multi-stream master transcoder:
+ * Configure Transcoder Clock select to direct no clock to the
+ * transcoder"
+ */
+ if (INTEL_GEN(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3807,11 +3856,49 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 trans_ddi_func_ctl2_val;
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
+ trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT_MASK);
+ I915_WRITE(reg, trans_ddi_func_ctl2_val);
+}
+
static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ intel_dsc_disable(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skylake_scaler_disable(old_crtc_state);
+ else
+ ironlake_pfit_disable(old_crtc_state);
/*
* When called from DP MST code:
@@ -3835,6 +3922,13 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) >= 11)
icl_unmap_plls_to_ports(encoder);
+
+ if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port)
+ intel_display_power_put_unchecked(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -4107,7 +4201,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
WARN_ON(crtc && crtc->active);
intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
- if (crtc_state && crtc_state->base.active)
+ if (crtc_state && crtc_state->hw.active)
intel_update_active_dpll(state, crtc, encoder);
}
@@ -4147,61 +4241,44 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
crtc_state->lane_lat_optim_mask);
}
-static void
-intel_ddi_post_pll_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
-
- if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
- intel_display_power_put_unchecked(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
-
- if (is_tc_port)
- intel_tc_port_put_link(dig_port);
-}
-
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- u32 val;
+ u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
- val = I915_READ(DDI_BUF_CTL(port));
- if (val & DDI_BUF_CTL_ENABLE) {
- val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+
+ if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
+ ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
+ I915_WRITE(DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+ dp_tp_ctl = DP_TP_CTL_ENABLE |
+ DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->link_mst)
- val |= DP_TP_CTL_MODE_MST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
else {
- val |= DP_TP_CTL_MODE_SST;
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
@@ -4237,7 +4314,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
@@ -4245,6 +4322,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
return;
+ intel_dsc_get_config(encoder, pipe_config);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -4255,7 +4334,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
@@ -4404,7 +4483,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int ret;
@@ -4538,7 +4617,7 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (!crtc_state->hdmi_high_tmds_clock_ratio &&
@@ -4709,8 +4788,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
- struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
@@ -4739,31 +4817,30 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
if (!intel_dig_port)
return;
- intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
+ encoder = &intel_dig_port->base;
- drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
+ drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
- intel_encoder->hotplug = intel_ddi_hotplug;
- intel_encoder->compute_output_type = intel_ddi_compute_output_type;
- intel_encoder->compute_config = intel_ddi_compute_config;
- intel_encoder->enable = intel_enable_ddi;
- intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
- intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
- intel_encoder->pre_enable = intel_ddi_pre_enable;
- intel_encoder->disable = intel_disable_ddi;
- intel_encoder->post_disable = intel_ddi_post_disable;
- intel_encoder->update_pipe = intel_ddi_update_pipe;
- intel_encoder->get_hw_state = intel_ddi_get_hw_state;
- intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_dp_encoder_suspend;
- intel_encoder->get_power_domains = intel_ddi_get_power_domains;
- intel_encoder->type = INTEL_OUTPUT_DDI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
- intel_encoder->port = port;
- intel_encoder->cloneable = 0;
- intel_encoder->pipe_mask = ~0;
+ encoder->hotplug = intel_ddi_hotplug;
+ encoder->compute_output_type = intel_ddi_compute_output_type;
+ encoder->compute_config = intel_ddi_compute_config;
+ encoder->enable = intel_enable_ddi;
+ encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ encoder->pre_enable = intel_ddi_pre_enable;
+ encoder->disable = intel_disable_ddi;
+ encoder->post_disable = intel_ddi_post_disable;
+ encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->get_hw_state = intel_ddi_get_hw_state;
+ encoder->get_config = intel_ddi_get_config;
+ encoder->suspend = intel_dp_encoder_suspend;
+ encoder->get_power_domains = intel_ddi_get_power_domains;
+
+ encoder->type = INTEL_OUTPUT_DDI;
+ encoder->power_domain = intel_port_to_power_domain(port);
+ encoder->port = port;
+ encoder->cloneable = 0;
+ encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4771,6 +4848,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
else
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
+
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
@@ -4781,8 +4859,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_tc_port_init(intel_dig_port, is_legacy);
- intel_encoder->update_prepare = intel_ddi_update_prepare;
- intel_encoder->update_complete = intel_ddi_update_complete;
+ encoder->update_prepare = intel_ddi_update_prepare;
+ encoder->update_complete = intel_ddi_update_complete;
}
WARN_ON(port > PORT_I);
@@ -4798,7 +4876,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
if (!intel_ddi_init_hdmi_connector(intel_dig_port))
goto err;
}
@@ -4822,6 +4900,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
return;
err:
- drm_encoder_cleanup(encoder);
+ drm_encoder_cleanup(&encoder->base);
kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 19aeab1246ee..167c6579d972 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -22,7 +22,7 @@ struct intel_encoder;
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
+void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 434f563603a6..1860da0a493e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -86,8 +86,8 @@
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
@@ -112,6 +112,21 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR16161616F,
};
+/* Primary plane formats for vlv/chv */
+static const u32 vlv_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_XBGR16161616F,
+};
+
static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -149,14 +164,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
struct intel_limit {
struct {
@@ -530,7 +542,7 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
static bool
needs_modeset(const struct intel_crtc_state *state)
{
- return drm_atomic_crtc_needs_modeset(&state->base);
+ return drm_atomic_crtc_needs_modeset(&state->uapi);
}
bool
@@ -547,6 +559,12 @@ is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
crtc_state->sync_mode_slaves_mask);
}
+static bool
+is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->master_transcoder != INVALID_TRANSCODER;
+}
+
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -658,7 +676,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -694,7 +712,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -752,7 +770,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int err = target;
@@ -808,7 +826,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
struct dpll clock;
int max_n;
bool found = false;
@@ -902,7 +920,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct dpll clock;
unsigned int bestppm = 1000000;
@@ -962,7 +980,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
int target, int refclk, struct dpll *match_clock,
struct dpll *best_clock)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
@@ -1025,33 +1043,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
NULL, best_clock);
}
-bool intel_crtc_active(struct intel_crtc *crtc)
-{
- /* Be paranoid as we can arrive here with only partial
- * state retrieved from the hardware during setup.
- *
- * We can ditch the adjusted_mode.crtc_clock check as soon
- * as Haswell has gained clock readout/fastboot support.
- *
- * We can ditch the crtc->primary->state->fb check as soon as we can
- * properly reconstruct framebuffers.
- *
- * FIXME: The intel_crtc->active here should be switched to
- * crtc->state->active once we have proper CRTC states wired up
- * for atomic.
- */
- return crtc->active && crtc->base.primary->state->fb &&
- crtc->config->base.adjusted_mode.crtc_clock;
-}
-
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- return crtc->config->cpu_transcoder;
-}
-
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1095,7 +1086,7 @@ static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1145,11 +1136,15 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
if (HAS_DDI(dev_priv)) {
- /* DDI does not have a specific FDI_TX register */
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
@@ -1266,11 +1261,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1290,8 +1283,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
}
I915_STATE_WARN(cur_state != state,
- "pipe %c assertion failure (expected %s, current %s)\n",
- pipe_name(pipe), onoff(state), onoff(cur_state));
+ "transcoder %s assertion failure (expected %s, current %s)\n",
+ transcoder_name(cpu_transcoder),
+ onoff(state), onoff(cur_state));
}
static void assert_plane(struct intel_plane *plane, bool state)
@@ -1418,7 +1412,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1467,7 +1461,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
@@ -1514,7 +1508,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
u32 dpll = crtc_state->dpll_hw_state.dpll;
int i;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
@@ -1554,7 +1548,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -1563,7 +1557,7 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
return;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
POSTING_READ(DPLL(pipe));
@@ -1574,7 +1568,7 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1591,7 +1585,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1645,7 +1639,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -1659,11 +1653,16 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
assert_fdi_rx_enabled(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
- /* Workaround: Set the timing override bit before enabling the
- * pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
+ /*
+ * Workaround: Set the timing override bit
+ * before enabling the pch transcoder.
+ */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(reg, val);
}
@@ -1672,6 +1671,10 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
@@ -1709,9 +1712,12 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- /* Workaround: set timing override bit. */
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ /* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+ /* Configure frame start delay to match the CPU */
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
@@ -1789,7 +1795,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* On i965gm the hardware frame counter reads
@@ -1809,16 +1815,25 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ assert_vblank_disabled(&crtc->base);
drm_crtc_set_max_vblank_count(&crtc->base,
intel_crtc_max_vblank_count(crtc_state));
drm_crtc_vblank_on(&crtc->base);
}
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ drm_crtc_vblank_off(&crtc->base);
+ assert_vblank_disabled(&crtc->base);
+}
+
static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1874,9 +1889,9 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1919,6 +1934,66 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
+static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (!is_ccs_modifier(fb->modifier))
+ return false;
+
+ return plane >= fb->format->num_planes / 2;
+}
+
+static bool is_gen12_ccs_modifier(u64 modifier)
+{
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS;
+}
+
+static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
+{
+ return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
+}
+
+static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return is_ccs_plane(fb, plane);
+
+ return plane == 1;
+}
+
+static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ (main_plane && main_plane >= fb->format->num_planes / 2));
+
+ return fb->format->num_planes / 2 + main_plane;
+}
+
+static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
+{
+ WARN_ON(!is_ccs_modifier(fb->modifier) ||
+ ccs_plane < fb->format->num_planes / 2);
+
+ return ccs_plane - fb->format->num_planes / 2;
+}
+
+/* Return either the main plane's CCS or - if not a CCS FB - UV plane */
+static int
+intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
+{
+ if (is_ccs_modifier(fb->modifier))
+ return main_to_ccs_plane(fb, main_plane);
+
+ return 1;
+}
+
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier)
+{
+ return info->is_yuv &&
+ info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
+}
+
static unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
@@ -1934,16 +2009,20 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
else
return 512;
case I915_FORMAT_MOD_Y_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 64;
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (color_plane == 1)
+ if (is_ccs_plane(fb, color_plane))
return 128;
/* fall through */
case I915_FORMAT_MOD_Yf_TILED:
@@ -1970,6 +2049,9 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
{
+ if (is_gen12_ccs_plane(fb, color_plane))
+ return 1;
+
return intel_tile_size(to_i915(fb->dev)) /
intel_tile_width_bytes(fb, color_plane);
}
@@ -1983,7 +2065,7 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int cpp = fb->format->cpp[color_plane];
*tile_width = tile_width_bytes / cpp;
- *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
+ *tile_height = intel_tile_height(fb, color_plane);
}
unsigned int
@@ -2060,7 +2142,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (color_plane == 1)
+ if (is_aux_plane(fb, color_plane))
return 4096;
switch (fb->modifier) {
@@ -2070,6 +2152,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return 16 * 1024;
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
@@ -2083,7 +2167,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return INTEL_GEN(dev_priv) < 4 ||
@@ -2126,19 +2210,18 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* pin/unpin/fence and not more.
*/
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- pinctl = 0;
-
- /* Valleyview is definitely limited to scanning out the first
+ /*
+ * Valleyview is definitely limited to scanning out the first
* 512MiB. Lets presume this behaviour was inherited from the
* g4x display engine and that all earlier gen are similarly
* limited. Testing suggests that it is a little more
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
+ pinctl = 0;
if (HAS_GMCH(dev_priv))
pinctl |= PIN_MAPPABLE;
@@ -2150,7 +2233,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
int ret;
- /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always, when
* possible, install a fence as the cost is not that onerous.
@@ -2180,8 +2264,6 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
i915_vma_get(vma);
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- i915_gem_object_unlock(obj);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
@@ -2216,7 +2298,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int color_plane)
{
- const struct drm_framebuffer *fb = state->base.fb;
+ const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
unsigned int pitch = state->color_plane[color_plane].stride;
@@ -2264,9 +2346,10 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
-static bool is_surface_linear(u64 modifier, int color_plane)
+static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
{
- return modifier == DRM_FORMAT_MOD_LINEAR;
+ return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
+ is_gen12_ccs_plane(fb, color_plane);
}
static u32 intel_adjust_aligned_offset(int *x, int *y,
@@ -2281,7 +2364,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2317,8 +2400,8 @@ static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
int color_plane,
u32 old_offset, u32 new_offset)
{
- return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
- state->base.rotation,
+ return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
+ state->hw.rotation,
state->color_plane[color_plane].stride,
old_offset, new_offset);
}
@@ -2351,7 +2434,7 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
if (alignment)
alignment--;
- if (!is_surface_linear(fb->modifier, color_plane)) {
+ if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2392,10 +2475,10 @@ static u32 intel_plane_compute_aligned_offset(int *x, int *y,
const struct intel_plane_state *state,
int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+ struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- const struct drm_framebuffer *fb = state->base.fb;
- unsigned int rotation = state->base.rotation;
+ const struct drm_framebuffer *fb = state->hw.fb;
+ unsigned int rotation = state->hw.rotation;
int pitch = state->color_plane[color_plane].stride;
u32 alignment;
@@ -2453,6 +2536,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
@@ -2473,7 +2557,7 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
-static const struct drm_format_info ccs_formats[] = {
+static const struct drm_format_info skl_ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
@@ -2484,6 +2568,28 @@ static const struct drm_format_info ccs_formats[] = {
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
+/*
+ * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
+ * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
+ * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
+ * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
+ * the main surface.
+ */
+static const struct drm_format_info gen12_ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
+ .hsub = 1, .vsub = 1, .has_alpha = true },
+};
+
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
@@ -2504,8 +2610,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
switch (cmd->modifier[0]) {
case I915_FORMAT_MOD_Y_TILED_CCS:
case I915_FORMAT_MOD_Yf_TILED_CCS:
- return lookup_format_info(ccs_formats,
- ARRAY_SIZE(ccs_formats),
+ return lookup_format_info(skl_ccs_formats,
+ ARRAY_SIZE(skl_ccs_formats),
+ cmd->pixel_format);
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return lookup_format_info(gen12_ccs_formats,
+ ARRAY_SIZE(gen12_ccs_formats),
cmd->pixel_format);
default:
return NULL;
@@ -2514,10 +2624,17 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
bool is_ccs_modifier(u64 modifier)
{
- return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
}
+static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
+{
+ return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
+ 512) * 64;
+}
+
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -2562,8 +2679,9 @@ static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ if (is_surface_linear(fb, color_plane)) {
u32 max_stride = intel_plane_fb_max_stride(dev_priv,
fb->format->format,
fb->modifier);
@@ -2572,20 +2690,41 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
* To make remapping with linear generally feasible
* we need the stride to be page aligned.
*/
- if (fb->pitches[color_plane] > max_stride)
+ if (fb->pitches[color_plane] > max_stride &&
+ !is_ccs_modifier(fb->modifier))
return intel_tile_size(dev_priv);
else
return 64;
- } else {
- return intel_tile_width_bytes(fb, color_plane);
}
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (is_ccs_modifier(fb->modifier) && color_plane == 0) {
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ if (IS_GEN(dev_priv, 9) && fb->width > 3840)
+ tile_width *= 4;
+ /*
+ * The main surface pitch must be padded to a multiple of four
+ * tile widths.
+ */
+ else if (INTEL_GEN(dev_priv) >= 12)
+ tile_width *= 4;
+ }
+ return tile_width;
}
bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int i;
/* We don't want to deal with remapping with cursors */
@@ -2623,16 +2762,16 @@ bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
* No remapping for invisible planes since we don't have
* an actual source viewport to remap.
*/
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return false;
if (!intel_plane_can_remap(plane_state))
@@ -2649,12 +2788,167 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
return stride > max_stride;
}
+static void
+intel_fb_plane_get_subsampling(int *hsub, int *vsub,
+ const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ int main_plane;
+
+ if (color_plane == 0) {
+ *hsub = 1;
+ *vsub = 1;
+
+ return;
+ }
+
+ /*
+ * TODO: Deduct the subsampling from the char block for all CCS
+ * formats and planes.
+ */
+ if (!is_gen12_ccs_plane(fb, color_plane)) {
+ *hsub = fb->format->hsub;
+ *vsub = fb->format->vsub;
+
+ return;
+ }
+
+ main_plane = ccs_to_main_plane(fb, color_plane);
+ *hsub = drm_format_info_block_width(fb->format, color_plane) /
+ drm_format_info_block_width(fb->format, main_plane);
+
+ /*
+ * The min stride check in the core framebuffer_check() function
+ * assumes that format->hsub applies to every plane except for the
+ * first plane. That's incorrect for the CCS AUX plane of the first
+ * plane, but for the above check to pass we must define the block
+ * width with that subsampling applied to it. Adjust the width here
+ * accordingly, so we can calculate the actual subsampling factor.
+ */
+ if (main_plane == 0)
+ *hsub *= fb->format->hsub;
+
+ *vsub = 32;
+}
+static int
+intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ int main_plane;
+ int hsub, vsub;
+ int tile_width, tile_height;
+ int ccs_x, ccs_y;
+ int main_x, main_y;
+
+ if (!is_ccs_plane(fb, ccs_plane))
+ return 0;
+
+ intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
+
+ tile_width *= hsub;
+ tile_height *= vsub;
+
+ ccs_x = (x * hsub) % tile_width;
+ ccs_y = (y * vsub) % tile_height;
+
+ main_plane = ccs_to_main_plane(fb, ccs_plane);
+ main_x = intel_fb->normal[main_plane].x % tile_width;
+ main_y = intel_fb->normal[main_plane].y % tile_height;
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal[main_plane].x,
+ intel_fb->normal[main_plane].y,
+ x, y);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
+{
+ int hsub, vsub;
+
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
+ *w = fb->width / hsub;
+ *h = fb->height / vsub;
+}
+
+/*
+ * Setup the rotated view for an FB plane and return the size the GTT mapping
+ * requires for this view.
+ */
+static u32
+setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
+ u32 gtt_offset_rotated, int x, int y,
+ unsigned int width, unsigned int height,
+ unsigned int tile_size,
+ unsigned int tile_width, unsigned int tile_height,
+ struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+ unsigned int pitch_tiles;
+ struct drm_rect r;
+
+ /* Y or Yf modifiers required for 90/270 rotation */
+ if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED)
+ return 0;
+
+ if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
+ return 0;
+
+ rot_info->plane[plane] = *plane_info;
+
+ intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
+
+ /* rotate the x/y offsets to match the GTT view */
+ drm_rect_init(&r, x, y, width, height);
+ drm_rect_rotate(&r,
+ plane_info->width * tile_width,
+ plane_info->height * tile_height,
+ DRM_MODE_ROTATE_270);
+ x = r.x1;
+ y = r.y1;
+
+ /* rotate the tile dimensions to match the GTT view */
+ pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
+ swap(tile_width, tile_height);
+
+ /*
+ * We only keep the x/y offsets, so push all of the
+ * gtt offset into the x/y offsets.
+ */
+ intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
+ gtt_offset_rotated * tile_size, 0);
+
+ /*
+ * First pixel of the framebuffer from
+ * the start of the rotated gtt mapping.
+ */
+ intel_fb->rotated[plane].x = x;
+ intel_fb->rotated[plane].y = y;
+
+ return plane_info->width * plane_info->height;
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct intel_rotation_info *rot_info = &intel_fb->rot_info;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
@@ -2669,8 +2963,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
int ret;
cpp = fb->format->cpp[i];
- width = drm_framebuffer_plane_width(fb->width, fb, i);
- height = drm_framebuffer_plane_height(fb->height, fb, i);
+ intel_fb_plane_dims(&width, &height, fb, i);
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
@@ -2679,36 +2972,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
return ret;
}
- if (is_ccs_modifier(fb->modifier) && i == 1) {
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int tile_width, tile_height;
- int main_x, main_y;
- int ccs_x, ccs_y;
-
- intel_tile_dims(fb, i, &tile_width, &tile_height);
- tile_width *= hsub;
- tile_height *= vsub;
-
- ccs_x = (x * hsub) % tile_width;
- ccs_y = (y * vsub) % tile_height;
- main_x = intel_fb->normal[0].x % tile_width;
- main_y = intel_fb->normal[0].y % tile_height;
-
- /*
- * CCS doesn't have its own x/y offset register, so the intra CCS tile
- * x/y offsets must match between CCS and the main surface.
- */
- if (main_x != ccs_x || main_y != ccs_y) {
- DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
- main_x, main_y,
- ccs_x, ccs_y,
- intel_fb->normal[0].x,
- intel_fb->normal[0].y,
- x, y);
- return -EINVAL;
- }
- }
+ ret = intel_fb_check_ccs_xy(fb, i, x, y);
+ if (ret)
+ return ret;
/*
* The fence (if used) is aligned to the start of the object
@@ -2739,23 +3005,21 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (!is_surface_linear(fb->modifier, i)) {
+ if (!is_surface_linear(fb, i)) {
+ struct intel_remapped_plane_info plane_info;
unsigned int tile_width, tile_height;
- unsigned int pitch_tiles;
- struct drm_rect r;
intel_tile_dims(fb, i, &tile_width, &tile_height);
- rot_info->plane[i].offset = offset;
- rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
- rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
- rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
-
- intel_fb->rotated[i].pitch =
- rot_info->plane[i].height * tile_height;
+ plane_info.offset = offset;
+ plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
+ tile_width * cpp);
+ plane_info.width = DIV_ROUND_UP(x + width, tile_width);
+ plane_info.height = DIV_ROUND_UP(y + height,
+ tile_height);
/* how many tiles does this plane need */
- size = rot_info->plane[i].stride * rot_info->plane[i].height;
+ size = plane_info.stride * plane_info.height;
/*
* If the plane isn't horizontally tile aligned,
* we need one more tile.
@@ -2763,36 +3027,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
if (x != 0)
size++;
- /* rotate the x/y offsets to match the GTT view */
- drm_rect_init(&r, x, y, width, height);
- drm_rect_rotate(&r,
- rot_info->plane[i].width * tile_width,
- rot_info->plane[i].height * tile_height,
- DRM_MODE_ROTATE_270);
- x = r.x1;
- y = r.y1;
-
- /* rotate the tile dimensions to match the GTT view */
- pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
- swap(tile_width, tile_height);
-
- /*
- * We only keep the x/y offsets, so push all of the
- * gtt offset into the x/y offsets.
- */
- intel_adjust_tile_offset(&x, &y,
- tile_width, tile_height,
- tile_size, pitch_tiles,
- gtt_offset_rotated * tile_size, 0);
-
- gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
-
- /*
- * First pixel of the framebuffer from
- * the start of the rotated gtt mapping.
- */
- intel_fb->rotated[i].x = x;
- intel_fb->rotated[i].y = y;
+ gtt_offset_rotated +=
+ setup_fb_rotation(i, &plane_info,
+ gtt_offset_rotated,
+ x, y, width, height,
+ tile_size,
+ tile_width, tile_height,
+ fb);
} else {
size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
x * cpp, tile_size);
@@ -2815,11 +3056,11 @@ static void
intel_plane_remap_gtt(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_rotation_info *info = &plane_state->view.rotated;
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes = fb->format->num_planes;
unsigned int tile_size = intel_tile_size(dev_priv);
unsigned int src_x, src_y;
@@ -2830,20 +3071,20 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
plane_state->view.type = drm_rotation_90_or_270(rotation) ?
I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
WARN_ON(is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
- drm_rect_translate(&plane_state->base.src,
+ drm_rect_translate(&plane_state->uapi.src,
-(src_x << 16), -(src_y << 16));
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
src_w << 16, src_h << 16,
DRM_MODE_ROTATE_270);
@@ -2876,6 +3117,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
+ WARN_ON(i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -2925,8 +3167,8 @@ static int
intel_plane_compute_gtt(struct intel_plane_state *plane_state)
{
const struct intel_framebuffer *fb =
- to_intel_framebuffer(plane_state->base.fb);
- unsigned int rotation = plane_state->base.rotation;
+ to_intel_framebuffer(plane_state->hw.fb);
+ unsigned int rotation = plane_state->hw.rotation;
int i, num_planes;
if (!fb)
@@ -2963,7 +3205,7 @@ intel_plane_compute_gtt(struct intel_plane_state *plane_state)
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
- drm_rect_rotate(&plane_state->base.src,
+ drm_rect_rotate(&plane_state->uapi.src,
fb->base.width << 16, fb->base.height << 16,
DRM_MODE_ROTATE_270);
@@ -2975,6 +3217,8 @@ static int i9xx_format_to_fourcc(int format)
switch (format) {
case DISPPLANE_8BPP:
return DRM_FORMAT_C8;
+ case DISPPLANE_BGRA555:
+ return DRM_FORMAT_ARGB1555;
case DISPPLANE_BGRX555:
return DRM_FORMAT_XRGB1555;
case DISPPLANE_BGRX565:
@@ -2984,10 +3228,18 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB8888;
case DISPPLANE_RGBX888:
return DRM_FORMAT_XBGR8888;
+ case DISPPLANE_BGRA888:
+ return DRM_FORMAT_ARGB8888;
+ case DISPPLANE_RGBA888:
+ return DRM_FORMAT_ABGR8888;
case DISPPLANE_BGRX101010:
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_BGRA101010:
+ return DRM_FORMAT_ARGB2101010;
+ case DISPPLANE_RGBA101010:
+ return DRM_FORMAT_ABGR2101010;
case DISPPLANE_RGBX161616:
return DRM_FORMAT_XBGR16161616F;
}
@@ -3032,10 +3284,17 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XRGB8888;
}
case PLANE_CTL_FORMAT_XRGB_2101010:
- if (rgb_order)
- return DRM_FORMAT_XBGR2101010;
- else
- return DRM_FORMAT_XRGB2101010;
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR2101010;
+ else
+ return DRM_FORMAT_XBGR2101010;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB2101010;
+ else
+ return DRM_FORMAT_XRGB2101010;
+ }
case PLANE_CTL_FORMAT_XRGB_16161616F:
if (rgb_order) {
if (alpha)
@@ -3131,19 +3390,19 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- plane_state->base.visible = visible;
+ plane_state->uapi.visible = visible;
if (visible)
- crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
else
- crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
+ crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_plane *plane;
/*
@@ -3154,13 +3413,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state)
crtc_state->active_planes = 0;
drm_for_each_plane_mask(plane, &dev_priv->drm,
- crtc_state->base.plane_mask)
+ crtc_state->uapi.plane_mask)
crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
}
static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane *plane)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane_state *plane_state =
@@ -3176,7 +3436,27 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
- intel_pre_disable_primary_noatomic(&crtc->base);
+ hsw_disable_ips(crtc_state);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
+ */
+ if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
intel_disable_plane(plane, crtc_state);
}
@@ -3229,7 +3509,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (intel_plane_ggtt_offset(state) == plane_config->base) {
- fb = state->base.fb;
+ fb = state->hw.fb;
drm_framebuffer_get(fb);
goto valid_fb;
}
@@ -3247,11 +3527,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
- intel_state->base.rotation = plane_config->rotation;
+ intel_state->hw.rotation = plane_config->rotation;
intel_fill_fb_ggtt_view(&intel_state->view, fb,
- intel_state->base.rotation);
+ intel_state->hw.rotation);
intel_state->color_plane[0].stride =
- intel_fb_pitch(fb, 0, intel_state->base.rotation);
+ intel_fb_pitch(fb, 0, intel_state->hw.rotation);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
@@ -3279,14 +3559,15 @@ valid_fb:
plane_state->crtc_w = fb->width;
plane_state->crtc_h = fb->height;
- intel_state->base.src = drm_plane_state_src(plane_state);
- intel_state->base.dst = drm_plane_state_dest(plane_state);
+ intel_state->uapi.src = drm_plane_state_src(plane_state);
+ intel_state->uapi.dst = drm_plane_state_dest(plane_state);
if (plane_config->tiling)
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
plane_state->crtc = &intel_crtc->base;
+ intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&to_intel_frontbuffer(fb)->bits);
@@ -3378,14 +3659,16 @@ static int icl_max_plane_height(void)
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int main_x, int main_y, u32 main_offset)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int aux_x = plane_state->color_plane[1].x;
- int aux_y = plane_state->color_plane[1].y;
- u32 aux_offset = plane_state->color_plane[1].offset;
- u32 alignment = intel_surf_alignment(fb, 1);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ccs_plane = main_to_ccs_plane(fb, 0);
+ int aux_x = plane_state->color_plane[ccs_plane].x;
+ int aux_y = plane_state->color_plane[ccs_plane].y;
+ u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
+ u32 alignment = intel_surf_alignment(fb, ccs_plane);
+ int hsub;
+ int vsub;
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
while (aux_offset >= main_offset && aux_y <= main_y) {
int x, y;
@@ -3397,8 +3680,12 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
x = aux_x / hsub;
y = aux_y / vsub;
- aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
- aux_offset, aux_offset - alignment);
+ aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
+ plane_state,
+ ccs_plane,
+ aux_offset,
+ aux_offset -
+ alignment);
aux_x = x * hsub + aux_x % hsub;
aux_y = y * vsub + aux_y % vsub;
}
@@ -3406,25 +3693,28 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
if (aux_x != main_x || aux_y != main_y)
return false;
- plane_state->color_plane[1].offset = aux_offset;
- plane_state->color_plane[1].x = aux_x;
- plane_state->color_plane[1].y = aux_y;
+ plane_state->color_plane[ccs_plane].offset = aux_offset;
+ plane_state->color_plane[ccs_plane].x = aux_x;
+ plane_state->color_plane[ccs_plane].y = aux_y;
return true;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- int w = drm_rect_width(&plane_state->base.src) >> 16;
- int h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int x = plane_state->uapi.src.x1 >> 16;
+ int y = plane_state->uapi.src.y1 >> 16;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int max_width;
int max_height;
- u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
+ u32 alignment;
+ u32 offset;
+ int aux_plane = intel_main_to_aux_plane(fb, 0);
+ u32 aux_offset = plane_state->color_plane[aux_plane].offset;
if (INTEL_GEN(dev_priv) >= 11)
max_width = icl_max_plane_width(fb, 0, rotation);
@@ -3490,7 +3780,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
offset, offset - alignment);
}
- if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
+ if (x != plane_state->color_plane[aux_plane].x ||
+ y != plane_state->color_plane[aux_plane].y) {
DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
@@ -3504,7 +3795,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
x << 16, y << 16);
return 0;
@@ -3512,14 +3803,14 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
int max_width = skl_max_plane_width(fb, 1, rotation);
int max_height = 4096;
- int x = plane_state->base.src.x1 >> 17;
- int y = plane_state->base.src.y1 >> 17;
- int w = drm_rect_width(&plane_state->base.src) >> 17;
- int h = drm_rect_height(&plane_state->base.src) >> 17;
+ int x = plane_state->uapi.src.x1 >> 17;
+ int y = plane_state->uapi.src.y1 >> 17;
+ int w = drm_rect_width(&plane_state->uapi.src) >> 17;
+ int h = drm_rect_height(&plane_state->uapi.src) >> 17;
u32 offset;
intel_add_fb_offsets(&x, &y, plane_state, 1);
@@ -3541,15 +3832,18 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int src_x = plane_state->base.src.x1 >> 16;
- int src_y = plane_state->base.src.y1 >> 16;
- int hsub = fb->format->hsub;
- int vsub = fb->format->vsub;
- int x = src_x / hsub;
- int y = src_y / vsub;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_x = plane_state->uapi.src.x1 >> 16;
+ int src_y = plane_state->uapi.src.y1 >> 16;
+ int hsub;
+ int vsub;
+ int x;
+ int y;
u32 offset;
+ intel_fb_plane_get_subsampling(&hsub, &vsub, fb, 1);
+ x = src_x / hsub;
+ y = src_y / vsub;
intel_add_fb_offsets(&x, &y, plane_state, 1);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
@@ -3562,21 +3856,22 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
ret = intel_plane_compute_gtt(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
/*
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (drm_format_info_is_yuv_semiplanar(fb->format)) {
+ if (intel_format_info_is_yuv_semiplanar(fb->format,
+ fb->modifier)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3601,7 +3896,7 @@ static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
/*
@@ -3673,7 +3968,7 @@ i9xx_plane_max_stride(struct intel_plane *plane,
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
@@ -3693,9 +3988,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 dspcntr;
dspcntr = DISPLAY_PLANE_ENABLE;
@@ -3711,6 +4006,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
+ case DRM_FORMAT_ARGB1555:
+ dspcntr |= DISPPLANE_BGRA555;
+ break;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
@@ -3720,12 +4018,24 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
+ case DRM_FORMAT_ARGB8888:
+ dspcntr |= DISPPLANE_BGRA888;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ dspcntr |= DISPPLANE_RGBA888;
+ break;
case DRM_FORMAT_XRGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_ARGB2101010:
+ dspcntr |= DISPPLANE_BGRA101010;
+ break;
+ case DRM_FORMAT_ABGR2101010:
+ dspcntr |= DISPPLANE_RGBA101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
dspcntr |= DISPPLANE_RGBX161616;
break;
@@ -3750,8 +4060,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3760,12 +4070,12 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
/* Undocumented hardware limit on i965/g4x/vlv/chv */
if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
@@ -3783,14 +4093,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
if (rotation & DRM_MODE_ROTATE_180) {
src_x += src_w - 1;
@@ -3827,15 +4137,15 @@ static int
i9xx_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
int ret;
ret = chv_plane_check_rotation(plane_state);
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
i9xx_plane_has_windowing(plane),
@@ -3847,7 +4157,7 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -3868,10 +4178,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
u32 linear_offset;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- int crtc_w = drm_rect_width(&plane_state->base.dst);
- int crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ int crtc_h = drm_rect_height(&plane_state->uapi.dst);
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
@@ -4011,7 +4321,7 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
*/
static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
int i;
@@ -4030,7 +4340,7 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ if (is_surface_linear(fb, color_plane))
return 64;
else if (drm_rotation_90_or_270(rotation))
return intel_tile_height(fb, color_plane);
@@ -4041,8 +4351,8 @@ static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride = plane_state->color_plane[color_plane].stride;
if (color_plane >= fb->format->num_planes)
@@ -4065,8 +4375,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
@@ -4111,10 +4423,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_CTL_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_CTL_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4122,17 +4434,17 @@ static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
case DRM_MODE_BLEND_COVERAGE:
return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- if (!plane_state->base.fb->format->has_alpha)
+ if (!plane_state->hw.fb->format->has_alpha)
return PLANE_COLOR_ALPHA_DISABLE;
- switch (plane_state->base.pixel_blend_mode) {
+ switch (plane_state->hw.pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
return PLANE_COLOR_ALPHA_DISABLE;
case DRM_MODE_BLEND_PREMULTI:
@@ -4140,7 +4452,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
case DRM_MODE_BLEND_COVERAGE:
return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
- MISSING_CASE(plane_state->base.pixel_blend_mode);
+ MISSING_CASE(plane_state->hw.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -4156,6 +4468,10 @@ static u32 skl_plane_ctl_tiling(u64 fb_modifier)
return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Y_TILED_CCS:
return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ return PLANE_CTL_TILED_Y |
+ PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
+ PLANE_CTL_CLEAR_COLOR_DISABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
case I915_FORMAT_MOD_Yf_TILED_CCS:
@@ -4206,7 +4522,7 @@ static u32 cnl_plane_ctl_flip(unsigned int reflect)
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_ctl = 0;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4225,9 +4541,9 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 plane_ctl;
@@ -4237,10 +4553,10 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
}
@@ -4262,7 +4578,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) >= 11)
@@ -4281,21 +4597,21 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
u32 plane_color_ctl = 0;
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
@@ -4483,7 +4799,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 trans_ddi_func_ctl2_val;
u8 master_select;
@@ -4511,25 +4827,6 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
trans_ddi_func_ctl2_val);
}
-static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg;
- u32 trans_ddi_func_ctl2_val;
-
- if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
- return;
-
- DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
- transcoder_name(old_crtc_state->cpu_transcoder));
-
- reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
- trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
- PORT_SYNC_MODE_MASTER_SELECT_MASK);
- I915_WRITE(reg, trans_ddi_func_ctl2_val);
-}
-
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4582,7 +4879,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
u32 temp, tries;
/* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, pipe);
+ assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -4927,7 +5224,7 @@ train_done:
static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -4992,12 +5289,10 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void ironlake_fdi_disable(struct drm_crtc *crtc)
+static void ironlake_fdi_disable(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -5088,9 +5383,9 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->hw.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -5204,7 +5499,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5247,7 +5542,7 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool e
static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
switch (crtc->pipe) {
@@ -5277,7 +5572,7 @@ static struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
@@ -5309,7 +5604,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
static void ironlake_pch_enable(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
@@ -5363,7 +5658,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv) &&
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
@@ -5393,7 +5688,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
static void lpt_pch_enable(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
@@ -5407,9 +5702,9 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
+static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
@@ -5505,15 +5800,16 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- const struct drm_format_info *format, bool need_scaler)
+ const struct drm_format_info *format,
+ u64 modifier, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
- to_intel_crtc(crtc_state->base.crtc);
+ to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Src coordinates are already rotated by 270 degrees for
@@ -5529,7 +5825,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* Once NV12 is enabled, handle it here while allocating scaler
* for NV12.
*/
- if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
@@ -5559,7 +5855,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && drm_format_info_is_yuv_semiplanar(format) &&
+ if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5601,17 +5897,18 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
- const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
need_scaler = true;
- return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
+ return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, NULL, need_scaler);
+ adjusted_mode->crtc_vdisplay, NULL, 0,
+ need_scaler);
}
/**
@@ -5627,26 +5924,28 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
struct intel_plane *intel_plane =
- to_intel_plane(plane_state->base.plane);
+ to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
- bool force_detach = !fb || !plane_state->base.visible;
+ bool force_detach = !fb || !plane_state->uapi.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && drm_format_info_is_yuv_semiplanar(fb->format))
+ fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
&plane_state->scaler_id,
- drm_rect_width(&plane_state->base.src) >> 16,
- drm_rect_height(&plane_state->base.src) >> 16,
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst),
- fb ? fb->format : NULL, need_scaler);
+ drm_rect_width(&plane_state->uapi.src) >> 16,
+ drm_rect_height(&plane_state->uapi.src) >> 16,
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst),
+ fb ? fb->format : NULL,
+ fb ? fb->modifier : 0,
+ need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5668,6 +5967,8 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5700,8 +6001,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return 0;
}
-static void skylake_scaler_disable(struct intel_crtc *crtc)
+void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
int i;
for (i = 0; i < crtc->num_scalers; i++)
@@ -5710,7 +6012,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
const struct intel_crtc_scaler_state *scaler_state =
@@ -5747,7 +6049,7 @@ static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -5768,7 +6070,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5804,7 +6106,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5839,77 +6141,10 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
*/
}
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- * @new_crtc_state: the enabling state
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS. Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-static void
-intel_post_enable_primary(struct drm_crtc *crtc,
- const struct intel_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So don't enable underrun reporting before at least some planes
- * are enabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
- /* Underruns don't always raise interrupts, so check manually. */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-}
-
-/* FIXME get rid of this and use pre_plane_update */
-static void
-intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- hsw_disable_ips(to_intel_crtc_state(crtc->state));
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- if (HAS_GMCH(dev_priv) &&
- intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, pipe);
-}
-
static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->ips_enabled)
@@ -5925,7 +6160,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
* Disable IPS before we program the LUT.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5936,7 +6171,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!new_crtc_state->ips_enabled)
@@ -5952,7 +6187,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* Re-enable IPS after the LUT has been programmed.
*/
if (IS_HASWELL(dev_priv) &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe) &&
new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return true;
@@ -5962,15 +6197,16 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
* forcibly enable IPS on the first fastset.
*/
if (new_crtc_state->update_pipe &&
- old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
return true;
return !old_crtc_state->ips_enabled;
}
-static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
if (!crtc_state->nv12_planes)
return false;
@@ -5981,9 +6217,10 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
}
-static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state)
+static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
/* Wa_2006604312:icl */
if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
return true;
@@ -5991,89 +6228,82 @@ static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
return false;
}
-static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
+static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = old_crtc_state->base.state;
- struct intel_crtc_state *pipe_config =
- intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
- crtc);
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(state, primary);
+ return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
+ new_crtc_state->active_planes;
+}
- intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
+static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->active_planes &&
+ (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
+}
- if (pipe_config->update_wm_post && pipe_config->base.active)
- intel_update_watermarks(crtc);
+static void intel_post_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
+
+ intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
- if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
- hsw_enable_ips(pipe_config);
+ if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
+ intel_update_watermarks(crtc);
- if (old_primary_state) {
- struct drm_plane_state *new_primary_state =
- drm_atomic_get_new_plane_state(state, primary);
+ if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
+ hsw_enable_ips(new_crtc_state);
+ if (new_primary_state)
intel_fbc_post_update(crtc);
- if (new_primary_state->visible &&
- (needs_modeset(pipe_config) ||
- !old_primary_state->visible))
- intel_post_enable_primary(&crtc->base, pipe_config);
- }
-
- if (needs_nv12_wa(dev_priv, old_crtc_state) &&
- !needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, false);
+ if (needs_nv12_wa(old_crtc_state) &&
+ !needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, false);
- if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- !needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
+ if (needs_scalerclk_wa(old_crtc_state) &&
+ !needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, false);
}
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *pipe_config)
+static void intel_pre_plane_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = old_crtc_state->base.state;
- struct drm_plane *primary = crtc->base.primary;
- struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(state, primary);
- bool modeset = needs_modeset(pipe_config);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *new_primary_state =
+ intel_atomic_get_new_plane_state(state, primary);
+ enum pipe pipe = crtc->pipe;
- if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
+ if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
- if (old_primary_state) {
- struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(intel_state,
- to_intel_plane(primary));
-
- intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So disable underrun reporting before all the planes get disabled.
- */
- if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
- (modeset || !new_primary_state->base.visible))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- }
+ if (new_primary_state &&
+ intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
- if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
- needs_nv12_wa(dev_priv, pipe_config))
- skl_wa_827(dev_priv, crtc->pipe, true);
+ if (!needs_nv12_wa(old_crtc_state) &&
+ needs_nv12_wa(new_crtc_state))
+ skl_wa_827(dev_priv, pipe, true);
/* Wa_2006604312:icl */
- if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
- needs_scalerclk_wa(dev_priv, pipe_config))
- icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
+ if (!needs_scalerclk_wa(old_crtc_state) &&
+ needs_scalerclk_wa(new_crtc_state))
+ icl_wa_scalerclkgating(dev_priv, pipe, true);
/*
* Vblank time updates from the shadow to live plane control register
@@ -6084,9 +6314,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
- pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
+ new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
* IVB workaround: must disable low power watermarks for at least
@@ -6095,36 +6325,45 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
- old_crtc_state->base.active)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ if (old_crtc_state->hw.active &&
+ new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
+ intel_wait_for_vblank(dev_priv, pipe);
/*
- * If we're doing a modeset, we're done. No need to do any pre-vblank
- * watermark programming here.
+ * If we're doing a modeset we don't need to do any
+ * pre-vblank watermark programming here.
*/
- if (needs_modeset(pipe_config))
- return;
+ if (!needs_modeset(new_crtc_state)) {
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ else if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(crtc);
+ }
/*
- * For platforms that support atomic watermarks, program the
- * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
- * will be the intermediate values that are safe for both pre- and
- * post- vblank; when vblank happens, the 'active' values will be set
- * to the final 'target' values and we'll do this again to get the
- * optimal watermarks. For gen9+ platforms, the values we program here
- * will be the final target values which will get automatically latched
- * at vblank time; no further programming will be necessary.
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So disable underrun reporting before all the planes get disabled.
*
- * If a platform hasn't been transitioned to atomic watermarks yet,
- * we'll continue to update watermarks the old way, if flags tell
- * us to.
+ * We do this after .initial_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the old plane configuration.
*/
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(intel_state,
- pipe_config);
- else if (pipe_config->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
}
static void intel_crtc_disable_planes(struct intel_atomic_state *state,
@@ -6148,7 +6387,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_disable_plane(plane, new_crtc_state);
- if (old_plane_state->base.visible)
+ if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
}
@@ -6247,11 +6486,12 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state)
}
}
-static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6267,11 +6507,12 @@ static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_pre_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_pre_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6287,11 +6528,12 @@ static void intel_encoders_pre_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6308,11 +6550,12 @@ static void intel_encoders_enable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6329,11 +6572,12 @@ static void intel_encoders_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_post_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_post_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6349,11 +6593,12 @@ static void intel_encoders_post_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
@@ -6369,11 +6614,12 @@ static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
}
}
-static void intel_encoders_update_pipe(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_atomic_state *state)
+static void intel_encoders_update_pipe(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_connector_state *conn_state;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
@@ -6391,22 +6637,21 @@ static void intel_encoders_update_pipe(struct intel_crtc *crtc,
static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
plane->disable_plane(plane, crtc_state);
}
-static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void ironlake_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
/*
@@ -6422,61 +6667,59 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (pipe_config->has_pch_encoder)
- intel_prepare_shared_dpll(pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ intel_prepare_shared_dpll(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- ironlake_set_pipeconf(pipe_config);
+ ironlake_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(pipe_config);
+ ironlake_fdi_pll_enable(new_crtc_state);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(pipe_config);
+ ironlake_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state, pipe_config);
- intel_enable_pipe(pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- if (pipe_config->has_pch_encoder)
- ironlake_pch_enable(state, pipe_config);
+ if (new_crtc_state->has_pch_encoder)
+ ironlake_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev, intel_crtc->pipe);
+ cpt_verify_modeset(dev_priv, pipe);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -6484,7 +6727,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (pipe_config->has_pch_encoder) {
+ if (new_crtc_state->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -6531,103 +6774,112 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
-static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+}
+
+static void haswell_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_pll_enable(state, crtc);
- if (pipe_config->shared_dpll)
- intel_enable_shared_dpll(pipe_config);
+ if (new_crtc_state->shared_dpll)
+ intel_enable_shared_dpll(new_crtc_state);
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
- icl_enable_trans_port_sync(pipe_config);
+ icl_enable_trans_port_sync(new_crtc_state);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_src_size(new_crtc_state);
if (cpu_transcoder != TRANSCODER_EDP &&
- !transcoder_is_dsi(cpu_transcoder)) {
+ !transcoder_is_dsi(cpu_transcoder))
I915_WRITE(PIPE_MULT(cpu_transcoder),
- pipe_config->pixel_multiplier - 1);
- }
+ new_crtc_state->pixel_multiplier - 1);
- if (pipe_config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(pipe_config,
- &pipe_config->fdi_m_n, NULL);
- }
+ if (new_crtc_state->has_pch_encoder)
+ intel_cpu_transcoder_set_m_n(new_crtc_state,
+ &new_crtc_state->fdi_m_n, NULL);
- if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(pipe_config);
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ hsw_set_frame_start_delay(new_crtc_state);
+ haswell_set_pipeconf(new_crtc_state);
+ }
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(pipe_config);
+ bdw_set_pipemisc(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- pipe_config->pch_pfit.enabled;
+ new_crtc_state->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(pipe_config);
+ skylake_pfit_enable(new_crtc_state);
else
- ironlake_pfit_enable(pipe_config);
+ ironlake_pfit_enable(new_crtc_state);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma/csc for pipe bottom color */
if (INTEL_GEN(dev_priv) < 9)
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(intel_crtc);
+ icl_set_pipe_chicken(crtc);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(pipe_config);
+ intel_ddi_enable_transcoder_func(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state, pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
if (INTEL_GEN(dev_priv) >= 11)
- icl_pipe_mbus_enable(intel_crtc);
+ icl_pipe_mbus_enable(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(pipe_config);
-
- if (pipe_config->has_pch_encoder)
- lpt_pch_enable(state, pipe_config);
+ intel_enable_pipe(new_crtc_state);
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(pipe_config, true);
+ if (new_crtc_state->has_pch_encoder)
+ lpt_pch_enable(state, new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
@@ -6636,16 +6888,16 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
- hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
+ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
}
}
-static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
+void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -6658,14 +6910,13 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
}
-static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void ironlake_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6675,10 +6926,9 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
@@ -6687,7 +6937,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_disable(state, crtc);
if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -6710,54 +6960,27 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
I915_WRITE(PCH_DPLL_SEL, temp);
}
- ironlake_fdi_pll_disable(intel_crtc);
+ ironlake_fdi_pll_disable(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
-static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void haswell_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
-
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
-
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
-
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_disable_pipe(old_crtc_state);
-
- if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_disable_transcoder_port_sync(old_crtc_state);
-
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(old_crtc_state);
-
- intel_dsc_disable(old_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skylake_scaler_disable(intel_crtc);
- else
- ironlake_pfit_disable(old_crtc_state);
-
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
-
- intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+ /*
+ * FIXME collapse everything to one hook.
+ * Need care with mst->ddi interactions.
+ */
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!crtc_state->gmch_pfit.control)
@@ -6768,7 +6991,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* according to register description and PRM.
*/
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
@@ -6893,14 +7116,14 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
@@ -6910,7 +7133,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
drm_for_each_encoder_mask(encoder, &dev_priv->drm,
- crtc_state->base.encoder_mask) {
+ crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6928,7 +7151,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
static u64
modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
@@ -6954,146 +7177,140 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put_unchecked(dev_priv, domain);
}
-static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void valleyview_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_pll_enable(state, crtc);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, pipe_config);
- chv_enable_pll(intel_crtc, pipe_config);
+ chv_prepare_pll(crtc, new_crtc_state);
+ chv_enable_pll(crtc, new_crtc_state);
} else {
- vlv_prepare_pll(intel_crtc, pipe_config);
- vlv_enable_pll(intel_crtc, pipe_config);
+ vlv_prepare_pll(crtc, new_crtc_state);
+ vlv_enable_pll(crtc, new_crtc_state);
}
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(state, pipe_config);
- intel_enable_pipe(pipe_config);
+ dev_priv->display.initial_watermarks(state, crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
-static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *state)
+static void i9xx_crtc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (WARN_ON(intel_crtc->active))
+ if (WARN_ON(crtc->active))
return;
- i9xx_set_pll_dividers(pipe_config);
+ i9xx_set_pll_dividers(new_crtc_state);
- if (intel_crtc_has_dp_encoder(pipe_config))
- intel_dp_set_m_n(pipe_config, M1_N1);
+ if (intel_crtc_has_dp_encoder(new_crtc_state))
+ intel_dp_set_m_n(new_crtc_state, M1_N1);
- intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(pipe_config);
+ intel_set_pipe_timings(new_crtc_state);
+ intel_set_pipe_src_size(new_crtc_state);
- i9xx_set_pipeconf(pipe_config);
+ i9xx_set_pipeconf(new_crtc_state);
- intel_crtc->active = true;
+ crtc->active = true;
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_enable(intel_crtc, pipe_config, state);
+ intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(intel_crtc, pipe_config);
+ i9xx_enable_pll(crtc, new_crtc_state);
- i9xx_pfit_enable(pipe_config);
+ i9xx_pfit_enable(new_crtc_state);
- intel_color_load_luts(pipe_config);
- intel_color_commit(pipe_config);
+ intel_color_load_luts(new_crtc_state);
+ intel_color_commit(new_crtc_state);
/* update DSPCNTR to configure gamma for pipe bottom color */
- intel_disable_primary_plane(pipe_config);
+ intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(state,
- pipe_config);
+ if (dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state, crtc);
else
- intel_update_watermarks(intel_crtc);
- intel_enable_pipe(pipe_config);
+ intel_update_watermarks(crtc);
+ intel_enable_pipe(new_crtc_state);
- assert_vblank_disabled(crtc);
- intel_crtc_vblank_on(pipe_config);
+ intel_crtc_vblank_on(new_crtc_state);
- intel_encoders_enable(intel_crtc, pipe_config, state);
+ intel_encoders_enable(state, crtc);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
I915_READ(PFIT_CONTROL));
I915_WRITE(PFIT_CONTROL, 0);
}
-static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *state)
+static void i9xx_crtc_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_crtc *crtc = old_crtc_state->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7102,16 +7319,15 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
- intel_encoders_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_disable(state, crtc);
- drm_crtc_vblank_off(crtc);
- assert_vblank_disabled(crtc);
+ intel_crtc_vblank_off(old_crtc_state);
intel_disable_pipe(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_disable(state, crtc);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
@@ -7122,92 +7338,97 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_disable_pll(old_crtc_state);
}
- intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
+ intel_encoders_post_pll_disable(state, crtc);
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(intel_crtc);
+ intel_update_watermarks(crtc);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
i830_enable_pipe(dev_priv, pipe);
}
-static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
+static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
struct intel_plane *plane;
- u64 domains;
struct drm_atomic_state *state;
- struct intel_crtc_state *crtc_state;
+ struct intel_crtc_state *temp_crtc_state;
+ enum pipe pipe = crtc->pipe;
+ u64 domains;
int ret;
- if (!intel_crtc->active)
+ if (!crtc_state->hw.active)
return;
- for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible)
- intel_plane_disable_noatomic(intel_crtc, plane);
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
}
- state = drm_atomic_state_alloc(crtc->dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
return;
}
state->acquire_ctx = ctx;
/* Everything's already locked, -EDEADLK can't happen. */
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
- ret = drm_atomic_add_affected_connectors(state, crtc);
+ temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(crtc_state) || ret);
+ WARN_ON(IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
+ dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id, crtc->name);
+ crtc->base.base.id, crtc->base.name);
+
+ crtc->active = false;
+ crtc->base.enabled = false;
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
- crtc->state->active = false;
- intel_crtc->active = false;
- crtc->enabled = false;
- crtc->state->connector_mask = 0;
- crtc->state->encoder_mask = 0;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ crtc_state->uapi.active = false;
+ crtc_state->uapi.connector_mask = 0;
+ crtc_state->uapi.encoder_mask = 0;
+ intel_crtc_free_hw_state(crtc_state);
+ memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
- for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+ for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
encoder->base.crtc = NULL;
- intel_fbc_disable(intel_crtc);
- intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
+ intel_fbc_disable(crtc);
+ intel_update_watermarks(crtc);
+ intel_disable_shared_dpll(crtc_state);
- domains = intel_crtc->enabled_power_domains;
+ domains = crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
intel_display_power_put_unchecked(dev_priv, domain);
- intel_crtc->enabled_power_domains = 0;
+ crtc->enabled_power_domains = 0;
- dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
- dev_priv->min_cdclk[intel_crtc->pipe] = 0;
- dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+ dev_priv->active_pipes &= ~BIT(pipe);
+ dev_priv->min_cdclk[pipe] = 0;
+ dev_priv->min_voltage_level[pipe] = 0;
- bw_state->data_rate[intel_crtc->pipe] = 0;
- bw_state->num_active_planes[intel_crtc->pipe] = 0;
+ bw_state->data_rate[pipe] = 0;
+ bw_state->num_active_planes[pipe] = 0;
}
/*
@@ -7257,8 +7478,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
if (!crtc_state)
return;
- I915_STATE_WARN(!crtc_state->base.active,
- "connector is active, but attached crtc isn't\n");
+ I915_STATE_WARN(!crtc_state->hw.active,
+ "connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
return;
@@ -7269,8 +7490,8 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc_state && crtc_state->base.active,
- "attached crtc is active, but connector isn't\n");
+ I915_STATE_WARN(crtc_state && crtc_state->hw.active,
+ "attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
}
@@ -7278,7 +7499,7 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
- if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+ if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
return crtc_state->fdi_lanes;
return 0;
@@ -7288,7 +7509,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
@@ -7361,7 +7582,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7407,7 +7628,7 @@ retry:
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -7437,9 +7658,9 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ to_i915(crtc_state->uapi.crtc->dev);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
@@ -7478,7 +7699,7 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
u32 pixel_rate;
- pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+ pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* We only use IF-ID interlacing. If we ever use
@@ -7511,12 +7732,12 @@ static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (HAS_GMCH(dev_priv))
/* FIXME calculate proper pipe pixel rate for GMCH pfit */
crtc_state->pixel_rate =
- crtc_state->base.adjusted_mode.crtc_clock;
+ crtc_state->hw.adjusted_mode.crtc_clock;
else
crtc_state->pixel_rate =
ilk_pipe_pixel_rate(crtc_state);
@@ -7526,7 +7747,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
if (INTEL_GEN(dev_priv) < 4) {
@@ -7552,7 +7773,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
- pipe_config->base.ctm) {
+ pipe_config->hw.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -7746,7 +7967,7 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
const struct intel_link_m_n *m_n)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -7773,7 +7994,7 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m_n,
const struct intel_link_m_n *m2_n2)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -8082,11 +8303,11 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_crtc_state *pipe_config;
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+ pipe_config = intel_crtc_state_alloc(crtc);
if (!pipe_config)
return -ENOMEM;
- pipe_config->base.crtc = &crtc->base;
+ pipe_config->cpu_transcoder = (enum transcoder)pipe;
pipe_config->pixel_multiplier = 1;
pipe_config->dpll = *dpll;
@@ -8246,11 +8467,11 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -8308,7 +8529,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -8322,7 +8543,7 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (IS_GEN(dev_priv, 2))
@@ -8344,39 +8565,39 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(HBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hblank_start =
+ pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hblank_end =
+ pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(HSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
tmp = I915_READ(VTOTAL(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
tmp = I915_READ(VBLANK(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vblank_start =
+ pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end =
+ pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
tmp = I915_READ(VSYNC(cpu_transcoder));
- pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
- pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
+ pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
if (intel_pipe_is_interlaced(pipe_config)) {
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
- pipe_config->base.adjusted_mode.crtc_vtotal += 1;
- pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
+ pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
}
}
@@ -8391,27 +8612,27 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
- pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
- pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
+ pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
+ pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
}
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config)
{
- mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
- mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
- mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
- mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
+ mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
+ mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
+ mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
- mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
- mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
- mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
- mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
+ mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
+ mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
+ mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
+ mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
- mode->flags = pipe_config->base.adjusted_mode.flags;
+ mode->flags = pipe_config->hw.adjusted_mode.flags;
mode->type = DRM_MODE_TYPE_DRIVER;
- mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
+ mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8420,7 +8641,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 pipeconf;
@@ -8457,7 +8678,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
}
}
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
@@ -8473,6 +8694,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ pipeconf |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -8866,7 +9089,7 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -8990,7 +9213,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* but in case the pipe is enabled w/o any ports we need a sane
* default.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
ret = true;
@@ -9506,7 +9729,7 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -9534,7 +9757,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -9554,13 +9777,15 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ val |= PIPECONF_FRAME_START_DELAY(0);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
@@ -9568,7 +9793,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -9583,7 +9808,7 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 val = 0;
@@ -9769,7 +9994,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct intel_limit *limit;
int refclk = 120000;
@@ -9982,7 +10207,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
case PLANE_CTL_TILED_Y:
plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
- fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
+ I915_FORMAT_MOD_Y_TILED_CCS;
else
fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
@@ -10195,7 +10422,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
INTEL_GEN(dev_priv) >= 11) {
@@ -10408,6 +10635,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
case TRANS_DDI_EDP_INPUT_C_ONOFF:
trans_pipe = PIPE_C;
break;
+ case TRANS_DDI_EDP_INPUT_D_ONOFF:
+ trans_pipe = PIPE_D;
+ break;
}
if (trans_pipe == crtc->pipe) {
@@ -10496,16 +10726,21 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_shared_dpll *pll;
enum port port;
u32 tmp;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-
- if (INTEL_GEN(dev_priv) >= 12)
- port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- else
- port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ if (transcoder_is_dsi(cpu_transcoder)) {
+ port = (cpu_transcoder == TRANSCODER_DSI_A) ?
+ PORT_A : PORT_B;
+ } else {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (INTEL_GEN(dev_priv) >= 12)
+ port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ else
+ port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
+ }
if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10561,7 +10796,7 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 transcoders;
enum transcoder cpu_transcoder;
@@ -10603,8 +10838,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
- intel_crtc_init_scalers(crtc, pipe_config);
-
pipe_config->master_transcoder = INVALID_TRANSCODER;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
@@ -10730,8 +10963,8 @@ out:
static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
@@ -10745,8 +10978,8 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.dst.x1;
- int y = plane_state->base.dst.y1;
+ int x = plane_state->uapi.dst.x1;
+ int y = plane_state->uapi.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10767,9 +11000,9 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
- &plane_state->base.plane->dev->mode_config;
- int width = drm_rect_width(&plane_state->base.dst);
- int height = drm_rect_height(&plane_state->base.dst);
+ &plane_state->uapi.plane->dev->mode_config;
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10778,8 +11011,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ unsigned int rotation = plane_state->hw.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10788,11 +11021,11 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
- src_x = plane_state->base.src.x1 >> 16;
- src_y = plane_state->base.src.y1 >> 16;
+ src_x = plane_state->uapi.src.x1 >> 16;
+ src_y = plane_state->uapi.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10807,14 +11040,14 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate_to(&plane_state->base.src,
+ drm_rect_translate_to(&plane_state->uapi.src,
src_x << 16, src_y << 16);
/* ILK+ do this automagically in hardware */
if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
offset += (src_h * src_w - 1) * fb->format->cpp[0];
}
@@ -10829,7 +11062,7 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -10837,8 +11070,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
@@ -10846,14 +11079,14 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
return ret;
/* Use the unclipped src/dst rectangles, which we program to hw */
- plane_state->base.src = drm_plane_state_src(&plane_state->base);
- plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+ plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
+ plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -10891,7 +11124,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = drm_rect_width(&plane_state->base.dst);
+ int width = drm_rect_width(&plane_state->uapi.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10903,7 +11136,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -10917,12 +11150,12 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
@@ -10950,9 +11183,9 @@ static void i845_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, size = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
- unsigned int width = drm_rect_width(&plane_state->base.dst);
- unsigned int height = drm_rect_height(&plane_state->base.dst);
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned int width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned int height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -11025,7 +11258,7 @@ i9xx_cursor_max_stride(struct intel_plane *plane,
static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cntl = 0;
@@ -11048,13 +11281,13 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
+ to_i915(plane_state->uapi.plane->dev);
u32 cntl = 0;
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (drm_rect_width(&plane_state->base.dst)) {
+ switch (drm_rect_width(&plane_state->uapi.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -11065,11 +11298,11 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(drm_rect_width(&plane_state->base.dst));
+ MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
return 0;
}
- if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
+ if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
cntl |= MCURSOR_ROTATE_180;
return cntl;
@@ -11078,9 +11311,9 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int width = drm_rect_width(&plane_state->base.dst);
- int height = drm_rect_height(&plane_state->base.dst);
+ to_i915(plane_state->uapi.plane->dev);
+ int width = drm_rect_width(&plane_state->uapi.dst);
+ int height = drm_rect_height(&plane_state->uapi.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -11102,7 +11335,7 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
* cursors.
*/
if (HAS_CUR_FBC(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+ plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
if (height < 8 || height > width)
return false;
} else {
@@ -11116,9 +11349,9 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int ret;
@@ -11133,19 +11366,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->base.dst),
- drm_rect_height(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->base.visible &&
+ WARN_ON(plane_state->uapi.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
- drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
+ drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
fb->pitches[0],
- drm_rect_width(&plane_state->base.dst));
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -11160,7 +11393,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.dst.x1 < 0) {
+ plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -11179,9 +11412,9 @@ static void i9xx_update_cursor(struct intel_plane *plane,
u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
unsigned long irqflags;
- if (plane_state && plane_state->base.visible) {
- unsigned width = drm_rect_width(&plane_state->base.dst);
- unsigned height = drm_rect_height(&plane_state->base.dst);
+ if (plane_state && plane_state->uapi.visible) {
+ unsigned width = drm_rect_width(&plane_state->uapi.dst);
+ unsigned height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
@@ -11436,9 +11669,9 @@ found:
goto fail;
}
- crtc_state->base.active = crtc_state->base.enable = true;
+ crtc_state->uapi.active = true;
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
&load_detect_mode);
if (ret)
goto fail;
@@ -11647,11 +11880,37 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
* we may need some idea for the dotclock anyway.
* Calculate one based on the FDI configuration.
*/
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
}
+static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
+ struct intel_crtc *crtc)
+{
+ memset(crtc_state, 0, sizeof(*crtc_state));
+
+ __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
+
+ crtc_state->cpu_transcoder = INVALID_TRANSCODER;
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
+ crtc_state->scaler_state.scaler_id = -1;
+}
+
+static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
+
+ if (crtc_state)
+ intel_crtc_state_reset(crtc_state, crtc);
+
+ return crtc_state;
+}
+
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
@@ -11671,14 +11930,12 @@ intel_encoder_current_mode(struct intel_encoder *encoder)
if (!mode)
return NULL;
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ crtc_state = intel_crtc_state_alloc(crtc);
if (!crtc_state) {
kfree(mode);
return NULL;
}
- crtc_state->base.crtc = &crtc->base;
-
if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
kfree(crtc_state);
kfree(mode);
@@ -11716,18 +11973,18 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
struct intel_plane_state *new)
{
/* Update watermarks on tiling or size changes. */
- if (new->base.visible != cur->base.visible)
+ if (new->uapi.visible != cur->uapi.visible)
return true;
- if (!cur->base.fb || !new->base.fb)
+ if (!cur->hw.fb || !new->hw.fb)
return false;
- if (cur->base.fb->modifier != new->base.fb->modifier ||
- cur->base.rotation != new->base.rotation ||
- drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
- drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
- drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
- drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
+ if (cur->hw.fb->modifier != new->hw.fb->modifier ||
+ cur->hw.rotation != new->hw.rotation ||
+ drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
+ drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
+ drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
+ drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
return true;
return false;
@@ -11735,10 +11992,10 @@ static bool intel_wm_need_update(const struct intel_plane_state *cur,
static bool needs_scaling(const struct intel_plane_state *state)
{
- int src_w = drm_rect_width(&state->base.src) >> 16;
- int src_h = drm_rect_height(&state->base.src) >> 16;
- int dst_w = drm_rect_width(&state->base.dst);
- int dst_h = drm_rect_height(&state->base.dst);
+ int src_w = drm_rect_width(&state->uapi.src) >> 16;
+ int src_h = drm_rect_height(&state->uapi.src) >> 16;
+ int dst_w = drm_rect_width(&state->uapi.dst);
+ int dst_h = drm_rect_height(&state->uapi.dst);
return (src_w != dst_w || src_h != dst_h);
}
@@ -11748,12 +12005,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = needs_modeset(crtc_state);
- bool was_crtc_enabled = old_crtc_state->base.active;
- bool is_crtc_enabled = crtc_state->base.active;
+ bool was_crtc_enabled = old_crtc_state->hw.active;
+ bool is_crtc_enabled = crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
@@ -11763,8 +12020,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
return ret;
}
- was_visible = old_plane_state->base.visible;
- visible = plane_state->base.visible;
+ was_visible = old_plane_state->uapi.visible;
+ visible = plane_state->uapi.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11780,7 +12037,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->base.visible = visible = false;
+ plane_state->uapi.visible = visible = false;
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
crtc_state->min_cdclk[plane->id] = 0;
@@ -11921,9 +12178,9 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_plane *plane, *linked;
struct intel_plane_state *plane_state;
int i;
@@ -11940,7 +12197,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
continue;
plane_state->planar_linked_plane = NULL;
- if (plane_state->planar_slave && !plane_state->base.visible) {
+ if (plane_state->planar_slave && !plane_state->uapi.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
@@ -11986,6 +12243,25 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+
+ /* Copy parameters to slave plane */
+ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
+ linked_state->color_ctl = plane_state->color_ctl;
+ memcpy(linked_state->color_plane, plane_state->color_plane,
+ sizeof(linked_state->color_plane));
+
+ intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
+ linked_state->uapi.src = plane_state->uapi.src;
+ linked_state->uapi.dst = plane_state->uapi.dst;
+
+ if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ if (linked->id == PLANE_SPRITE5)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
+ }
}
return 0;
@@ -11993,9 +12269,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -12004,9 +12280,9 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct drm_connector *master_connector, *connector;
struct drm_connector_state *connector_state;
struct drm_connector_list_iter conn_iter;
@@ -12030,8 +12306,8 @@ static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
continue;
if (!connector->has_tile)
continue;
- if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
- crtc_state->base.mode.vdisplay != connector->tile_v_size)
+ if (crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->hw.mode.vdisplay != connector->tile_v_size)
return 0;
if (connector->tile_h_loc == connector->num_h_tile - 1 &&
connector->tile_v_loc == connector->num_v_tile - 1)
@@ -12080,7 +12356,7 @@ static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
BIT(crtc_state->cpu_transcoder);
DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
transcoder_name(crtc_state->master_transcoder),
- crtc_state->base.crtc->base.id,
+ crtc_state->uapi.crtc->base.id,
master_pipe_config->sync_mode_slaves_mask);
}
@@ -12097,10 +12373,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
int ret;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->base.active)
+ mode_changed && !crtc_state->hw.active)
crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->base.enable &&
+ if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
!WARN_ON(crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
@@ -12113,10 +12389,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
* when C8 planes are getting enabled/disabled.
*/
if (c8_planes_changed(crtc_state))
- crtc_state->base.color_mgmt_changed = true;
+ crtc_state->uapi.color_mgmt_changed = true;
if (mode_changed || crtc_state->update_pipe ||
- crtc_state->base.color_mgmt_changed) {
+ crtc_state->uapi.color_mgmt_changed) {
ret = intel_color_check(crtc_state);
if (ret)
return ret;
@@ -12229,7 +12505,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -12354,14 +12630,14 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
plane->base.base.id, plane->base.name,
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
return;
}
@@ -12369,20 +12645,20 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->base.visible));
+ yesno(plane_state->uapi.visible));
DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->base.rotation, plane_state->scaler_id);
- if (plane_state->base.visible)
+ plane_state->hw.rotation, plane_state->scaler_id);
+ if (plane_state->uapi.visible)
DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->base.src),
- DRM_RECT_ARG(&plane_state->base.dst));
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_plane_state *plane_state;
struct intel_plane *plane;
@@ -12391,14 +12667,14 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->base.enable), context);
+ yesno(pipe_config->hw.enable), context);
- if (!pipe_config->base.enable)
+ if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->base.active),
+ yesno(pipe_config->hw.active),
buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
@@ -12438,10 +12714,10 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
DRM_DEBUG_KMS("requested mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
- drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
- intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
+ drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
+ intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
pipe_config->pipe_src_w, pipe_config->pipe_src_h,
@@ -12561,22 +12837,59 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
return ret;
}
+static void
+intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
+{
+ intel_crtc_copy_color_blobs(crtc_state);
+}
+
+static void
+intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->hw.enable = crtc_state->uapi.enable;
+ crtc_state->hw.active = crtc_state->uapi.active;
+ crtc_state->hw.mode = crtc_state->uapi.mode;
+ crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
+}
+
+static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.enable = crtc_state->hw.enable;
+ crtc_state->uapi.active = crtc_state->hw.active;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
+
+ crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
+
+ /* copy color blobs to uapi */
+ drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
+ crtc_state->hw.degamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
+ crtc_state->hw.gamma_lut);
+ drm_property_replace_blob(&crtc_state->uapi.ctm,
+ crtc_state->hw.ctm);
+}
+
static int
-clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
+intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *saved_state;
- saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
+ saved_state = intel_crtc_state_alloc(crtc);
if (!saved_state)
return -ENOMEM;
+ /* free the old crtc_state->hw members */
+ intel_crtc_free_hw_state(crtc_state);
+
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
+ saved_state->uapi = crtc_state->uapi;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
@@ -12594,20 +12907,19 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->sync_mode_slaves_mask =
crtc_state->sync_mode_slaves_mask;
- /* Keep base drm_crtc_state intact, only clear our extended struct */
- BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
- memcpy(&crtc_state->base + 1, &saved_state->base + 1,
- sizeof(*crtc_state) - sizeof(crtc_state->base));
-
+ memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
+
+ intel_crtc_copy_uapi_to_hw_state(crtc_state);
+
return 0;
}
static int
intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
+ struct drm_atomic_state *state = pipe_config->uapi.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
@@ -12615,10 +12927,6 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
int i;
bool retry = true;
- ret = clear_intel_crtc_state(pipe_config);
- if (ret)
- return ret;
-
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
@@ -12627,13 +12935,13 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* positive or negative polarity is requested, treat this as meaning
* negative polarity.
*/
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
- if (!(pipe_config->base.adjusted_mode.flags &
+ if (!(pipe_config->hw.adjusted_mode.flags &
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
- pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
pipe_config);
@@ -12650,7 +12958,7 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_mode_get_hv_timing(&pipe_config->base.mode,
+ drm_mode_get_hv_timing(&pipe_config->hw.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -12683,7 +12991,7 @@ encoder_retry:
pipe_config->pixel_multiplier = 1;
/* Fill in default crtc timings, allow encoders to overwrite them. */
- drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
+ drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
/* Set the crtc_state defaults for trans_port_sync */
@@ -12717,7 +13025,7 @@ encoder_retry:
/* Set default port clock if not overwritten by the encoder. Needs to be
* done afterwards in case the encoder adjusts the mode. */
if (!pipe_config->port_clock)
- pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
+ pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
@@ -12746,6 +13054,12 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ /*
+ * Make drm_calc_timestamping_constants in
+ * drm_atomic_helper_update_legacy_modeset_state() happy
+ */
+ pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
+
return 0;
}
@@ -12884,13 +13198,13 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
const struct intel_crtc_state *pipe_config,
bool fastset)
{
- struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
- (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
- !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
+ (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
+ !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
@@ -13079,19 +13393,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(output_types);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
@@ -13108,17 +13422,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
DRM_MODE_FLAG_NVSYNC);
}
@@ -13157,7 +13471,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
- PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
}
@@ -13202,7 +13516,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -13217,6 +13531,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
+ PIPE_CONF_CHECK_I(dsc.compression_enable);
+ PIPE_CONF_CHECK_I(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -13236,7 +13554,7 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
if (pipe_config->has_pch_encoder) {
int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
- int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+ int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
/*
* FDI already provided one idea for the dotclock.
@@ -13264,7 +13582,7 @@ static void verify_wm_state(struct intel_crtc *crtc,
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
@@ -13469,16 +13787,14 @@ verify_crtc_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
- struct intel_crtc_state *pipe_config;
- struct drm_atomic_state *state;
+ struct intel_crtc_state *pipe_config = old_crtc_state;
+ struct drm_atomic_state *state = old_crtc_state->uapi.state;
bool active;
- state = old_crtc_state->base.state;
- __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
- pipe_config = old_crtc_state;
- memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = &crtc->base;
- pipe_config->base.state = state;
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
+ intel_crtc_free_hw_state(old_crtc_state);
+ intel_crtc_state_reset(old_crtc_state, crtc);
+ old_crtc_state->uapi.state = state;
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
@@ -13486,23 +13802,26 @@ verify_crtc_state(struct intel_crtc *crtc,
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- active = new_crtc_state->base.active;
+ active = new_crtc_state->hw.active;
- I915_STATE_WARN(new_crtc_state->base.active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", new_crtc_state->base.active, active);
+ I915_STATE_WARN(new_crtc_state->hw.active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, active);
- I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
+ I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n",
+ new_crtc_state->hw.active, crtc->active);
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->base.active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, new_crtc_state->base.active);
+ I915_STATE_WARN(active != new_crtc_state->hw.active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active,
+ new_crtc_state->hw.active);
I915_STATE_WARN(active && crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
@@ -13514,7 +13833,7 @@ verify_crtc_state(struct intel_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (!new_crtc_state->base.active)
+ if (!new_crtc_state->hw.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
@@ -13537,7 +13856,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
assert_plane(plane, plane_state->planar_slave ||
- plane_state->base.visible);
+ plane_state->uapi.visible);
}
static void
@@ -13576,7 +13895,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
crtc_mask = drm_crtc_mask(&crtc->base);
- if (new_crtc_state->base.active)
+ if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
@@ -13655,10 +13974,10 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
static void
intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
@@ -13740,7 +14059,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
!needs_modeset(crtc_state))
continue;
@@ -13765,7 +14084,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
needs_modeset(crtc_state))
continue;
@@ -13802,12 +14121,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.active)
+ if (new_crtc_state->hw.active)
state->active_pipes |= BIT(crtc->pipe);
else
state->active_pipes &= ~BIT(crtc->pipe);
- if (old_crtc_state->base.active != new_crtc_state->base.active)
+ if (old_crtc_state->hw.active != new_crtc_state->hw.active)
state->active_pipe_changes |= BIT(crtc->pipe);
}
@@ -13852,7 +14171,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
return;
- new_crtc_state->base.mode_changed = false;
+ new_crtc_state->uapi.mode_changed = false;
new_crtc_state->update_pipe = true;
/*
@@ -13995,9 +14314,9 @@ static int intel_atomic_check(struct drm_device *dev,
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (new_crtc_state->base.mode.private_flags !=
- old_crtc_state->base.mode.private_flags)
- new_crtc_state->base.mode_changed = true;
+ if (new_crtc_state->hw.mode.private_flags !=
+ old_crtc_state->hw.mode.private_flags)
+ new_crtc_state->uapi.mode_changed = true;
}
ret = drm_atomic_helper_check_modeset(dev, &state->base);
@@ -14006,14 +14325,24 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(new_crtc_state))
+ if (!needs_modeset(new_crtc_state)) {
+ /* Light copy */
+ intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
+
continue;
+ }
+
+ if (!new_crtc_state->uapi.enable) {
+ intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
- if (!new_crtc_state->base.enable) {
any_ms = true;
continue;
}
+ ret = intel_crtc_prepare_cleared_state(new_crtc_state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
@@ -14111,7 +14440,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_GEN(dev_priv, 2))
+ if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -14125,7 +14454,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
@@ -14159,6 +14488,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
@@ -14167,7 +14497,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
* CRTC was enabled.
*/
if (!modeset) {
- if (new_crtc_state->base.color_mgmt_changed ||
+ if (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe)
intel_color_commit(new_crtc_state);
@@ -14182,8 +14512,7 @@ static void commit_pipe_config(struct intel_atomic_state *state,
}
if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
+ dev_priv->display.atomic_update_watermarks(state, crtc);
}
static void intel_update_crtc(struct intel_crtc *crtc,
@@ -14200,20 +14529,20 @@ static void intel_update_crtc(struct intel_crtc *crtc,
if (modeset) {
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(new_crtc_state, state);
+ dev_priv->display.crtc_enable(state, crtc);
/* vblanks work again, re-enable pipe CRC. */
intel_crtc_enable_pipe_crc(crtc);
} else {
if (new_crtc_state->preload_luts &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ intel_pre_plane_update(state, crtc);
if (new_crtc_state->update_pipe)
- intel_encoders_update_pipe(crtc, new_crtc_state, state);
+ intel_encoders_update_pipe(state, crtc);
}
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
@@ -14240,13 +14569,13 @@ static void intel_update_crtc(struct intel_crtc *crtc,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
@@ -14271,97 +14600,59 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(old_crtc_state, state);
+ dev_priv->display.crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
- /*
- * Underruns don't always raise interrupts,
- * so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
/* FIXME unify this for all platforms */
- if (!new_crtc_state->base.active &&
+ if (!new_crtc_state->hw.active &&
!HAS_GMCH(dev_priv) &&
dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state,
- new_crtc_state);
-}
-
-static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
- struct intel_crtc_state *new_slave_crtc_state =
- intel_atomic_get_new_crtc_state(state, slave_crtc);
- struct intel_crtc_state *old_slave_crtc_state =
- intel_atomic_get_old_crtc_state(state, slave_crtc);
-
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
-
- /* Disable Slave first */
- intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
- if (old_slave_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_slave_crtc_state,
- new_slave_crtc_state,
- slave_crtc);
-
- /* Disable Master */
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (old_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_crtc_state,
- new_crtc_state,
- crtc);
+ dev_priv->display.initial_watermarks(state, crtc);
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
{
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
+ u32 handled = 0;
int i;
- /*
- * Disable CRTC/pipes in reverse order because some features(MST in
- * TGL+) requires master and slave relationship between pipes, so it
- * should always pick the lowest pipe as master as it will be enabled
- * first and disable in the reverse order so the master will be the
- * last one to be disabled.
- */
- for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ /* Only disable port sync slaves */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (!needs_modeset(new_crtc_state))
continue;
+ if (!old_crtc_state->hw.active)
+ continue;
+
/* In case of Transcoder port Sync master slave CRTCs can be
* assigned in any order and we need to make sure that
* slave CRTCs are disabled first and then master CRTC since
* Slave vblanks are masked till Master Vblanks.
*/
- if (is_trans_port_sync_mode(new_crtc_state)) {
- if (is_trans_port_sync_master(new_crtc_state))
- intel_trans_port_sync_modeset_disables(state,
- crtc,
- old_crtc_state,
- new_crtc_state);
- else
- continue;
- } else {
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ if (!is_trans_port_sync_slave(old_crtc_state))
+ continue;
- if (old_crtc_state->base.active)
- intel_old_crtc_state_disables(state,
- old_crtc_state,
- new_crtc_state,
- crtc);
- }
+ intel_pre_plane_update(state, crtc);
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
+ handled |= BIT(crtc->pipe);
+ }
+
+ /* Disable everything else left on */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state) ||
+ (handled & BIT(crtc->pipe)))
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ if (old_crtc_state->hw.active)
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
}
}
@@ -14372,7 +14663,7 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->base.active)
+ if (!new_crtc_state->hw.active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -14387,7 +14678,7 @@ static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(new_crtc_state, state);
+ dev_priv->display.crtc_enable(state, crtc);
intel_crtc_enable_pipe_crc(crtc);
}
@@ -14437,7 +14728,7 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
* of enabling them on the CRTC's first fastset.
*/
if (new_crtc_state->update_pipe && !modeset &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -14492,17 +14783,19 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned int updated = 0;
- bool progress;
- int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ u8 dirty_pipes = 0;
+ int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
/* ignore allocations for crtc's that have been turned off. */
- if (new_crtc_state->base.active)
+ if (!needs_modeset(new_crtc_state) && new_crtc_state->hw.active)
entries[i] = old_crtc_state->wm.skl.ddb;
+ if (new_crtc_state->hw.active)
+ dirty_pipes |= BIT(crtc->pipe);
+ }
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -14514,15 +14807,13 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
* never overlap with eachother inbetween CRTC updates. Otherwise we'll
* cause pipe underruns and other bad stuff.
*/
- do {
- progress = false;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ while (dirty_pipes) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
enum pipe pipe = crtc->pipe;
- bool vbl_wait = false;
bool modeset = needs_modeset(new_crtc_state);
- if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
+ if ((dirty_pipes & BIT(pipe)) == 0)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
@@ -14530,20 +14821,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
INTEL_NUM_PIPES(dev_priv), i))
continue;
- updated |= BIT(pipe);
entries[i] = new_crtc_state->wm.skl.ddb;
-
- /*
- * If this is an already active pipe, it's DDB changed,
- * and this isn't the last pipe that needs updating
- * then we need to wait for a vblank to pass for the
- * new ddb allocation to take effect.
- */
- if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
- &old_crtc_state->wm.skl.ddb) &&
- !modeset &&
- state->wm_results.dirty_pipes != updated)
- vbl_wait = true;
+ dirty_pipes &= ~BIT(pipe);
if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
if (is_trans_port_sync_master(new_crtc_state))
@@ -14558,12 +14837,18 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
new_crtc_state);
}
- if (vbl_wait)
+ /*
+ * If this is an already active pipe, it's DDB changed,
+ * and this isn't the last pipe that needs updating
+ * then we need to wait for a vblank to pass for the
+ * new ddb allocation to take effect.
+ */
+ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+ &old_crtc_state->wm.skl.ddb) &&
+ !modeset && dirty_pipes)
intel_wait_for_vblank(dev_priv, pipe);
-
- progress = true;
}
- } while (progress);
+ }
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
@@ -14684,12 +14969,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
+ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
+ drm_crtc_send_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock_irq(&dev->event_lock);
- new_crtc_state->base.event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
}
@@ -14720,10 +15006,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_flip_done(dev, &state->base);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (new_crtc_state->base.active &&
+ if (new_crtc_state->hw.active &&
!needs_modeset(new_crtc_state) &&
!new_crtc_state->preload_luts &&
- (new_crtc_state->base.color_mgmt_changed ||
+ (new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
intel_color_load_luts(new_crtc_state);
}
@@ -14735,14 +15021,25 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So re-enable underrun reporting after some planes get enabled.
+ *
+ * We do this before .optimize_watermarks() so that we have a
+ * chance of catching underruns with the intermediate watermarks
+ * vs. the new plane configuration.
+ */
+ if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(state,
- new_crtc_state);
+ dev_priv->display.optimize_watermarks(state, crtc);
}
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- intel_post_plane_update(old_crtc_state);
+ intel_post_plane_update(state, crtc);
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -14750,6 +15047,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
+ /* Underruns don't always raise interrupts, so check manually */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
if (state->modeset)
intel_verify_planes(state);
@@ -14823,8 +15124,8 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i)
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
- to_intel_frontbuffer(new_plane_state->base.fb),
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
}
@@ -14991,9 +15292,9 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
@@ -15056,9 +15357,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_plane_state->base.state);
+ to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_plane_state->base.fb;
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
@@ -15089,9 +15390,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_plane_state->base.fence) { /* explicit fencing */
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_plane_state->base.fence,
+ new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -15112,9 +15413,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
fb_obj_bump_render_priority(obj);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
- if (!new_plane_state->base.fence) { /* implicit fencing */
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -15126,13 +15427,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_plane_state->base.crtc,
- new_plane_state->base.fence);
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
}
/*
@@ -15165,7 +15466,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_plane_state->base.state);
+ to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
@@ -15229,8 +15530,12 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
@@ -15288,7 +15593,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->base.active || needs_modeset(crtc_state) ||
+ if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
crtc_state->update_pipe)
goto slow;
@@ -15297,8 +15602,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->base.commit &&
- !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
+ if (old_plane_state->uapi.commit &&
+ !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
goto slow;
/*
@@ -15306,12 +15611,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->base.crtc != &crtc->base ||
- old_plane_state->base.src_w != src_w ||
- old_plane_state->base.src_h != src_h ||
- old_plane_state->base.crtc_w != crtc_w ||
- old_plane_state->base.crtc_h != crtc_h ||
- !old_plane_state->base.fb != !fb)
+ if (old_plane_state->uapi.crtc != &crtc->base ||
+ old_plane_state->uapi.src_w != src_w ||
+ old_plane_state->uapi.src_h != src_h ||
+ old_plane_state->uapi.crtc_w != crtc_w ||
+ old_plane_state->uapi.crtc_h != crtc_h ||
+ !old_plane_state->uapi.fb != !fb)
goto slow;
new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
@@ -15324,16 +15629,16 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
goto out_free;
}
- drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
- new_plane_state->base.src_x = src_x;
- new_plane_state->base.src_y = src_y;
- new_plane_state->base.src_w = src_w;
- new_plane_state->base.src_h = src_h;
- new_plane_state->base.crtc_x = crtc_x;
- new_plane_state->base.crtc_y = crtc_y;
- new_plane_state->base.crtc_w = crtc_w;
- new_plane_state->base.crtc_h = crtc_h;
+ new_plane_state->uapi.src_x = src_x;
+ new_plane_state->uapi.src_y = src_y;
+ new_plane_state->uapi.src_w = src_w;
+ new_plane_state->uapi.src_h = src_h;
+ new_plane_state->uapi.crtc_x = crtc_x;
+ new_plane_state->uapi.crtc_y = crtc_y;
+ new_plane_state->uapi.crtc_w = crtc_w;
+ new_plane_state->uapi.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
@@ -15344,13 +15649,14 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
if (ret)
goto out_free;
- intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
- to_intel_frontbuffer(new_plane_state->base.fb),
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
+ ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
+ to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
/* Swap plane state */
- plane->base.state = &new_plane_state->base;
+ plane->base.state = &new_plane_state->uapi;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -15364,7 +15670,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (new_plane_state->base.visible)
+ if (new_plane_state->uapi.visible)
intel_update_plane(plane, crtc_state, new_plane_state);
else
intel_disable_plane(plane, crtc_state);
@@ -15373,11 +15679,11 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
if (ret)
- intel_plane_destroy_state(&plane->base, &new_plane_state->base);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
else
- intel_plane_destroy_state(&plane->base, &old_plane_state->base);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
return ret;
slow:
@@ -15419,7 +15725,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
unsigned int possible_crtcs;
- const u64 *modifiers;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -15451,7 +15756,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ formats = vlv_primary_formats;
+ num_formats = ARRAY_SIZE(vlv_primary_formats);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
/*
* WaFP16GammaEnabling:ivb
* "Workaround : When using the 64-bit format, the plane
@@ -15472,51 +15780,45 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
}
- modifiers = i9xx_format_modifiers;
-
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
-
- if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- plane->min_cdclk = hsw_plane_min_cdclk;
- else if (IS_IVYBRIDGE(dev_priv))
- plane->min_cdclk = ivb_plane_min_cdclk;
- else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
- plane->min_cdclk = vlv_plane_min_cdclk;
- else
- plane->min_cdclk = i9xx_plane_min_cdclk;
-
- plane_funcs = &i965_plane_funcs;
} else {
formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
- modifiers = i9xx_format_modifiers;
+ }
+
+ if (INTEL_GEN(dev_priv) >= 4)
+ plane_funcs = &i965_plane_funcs;
+ else
+ plane_funcs = &i8xx_plane_funcs;
- plane->max_stride = i9xx_plane_max_stride;
- plane->update_plane = i9xx_update_plane;
- plane->disable_plane = i9xx_disable_plane;
- plane->get_hw_state = i9xx_plane_get_hw_state;
- plane->check_plane = i9xx_plane_check;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else
plane->min_cdclk = i9xx_plane_min_cdclk;
- plane_funcs = &i8xx_plane_funcs;
- }
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
possible_crtcs = BIT(pipe);
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
possible_crtcs, plane_funcs,
- formats, num_formats, modifiers,
+ formats, num_formats,
+ i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
plane_name(plane->i9xx_plane));
@@ -15620,28 +15922,6 @@ fail:
return ERR_PTR(ret);
}
-static void intel_crtc_init_scalers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int i;
-
- crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
- if (!crtc->num_scalers)
- return;
-
- for (i = 0; i < crtc->num_scalers; i++) {
- struct intel_scaler *scaler = &scaler_state->scalers[i];
-
- scaler->in_use = 0;
- scaler->mode = 0;
- }
-
- scaler_state->scaler_id = -1;
-}
-
#define INTEL_CRTC_FUNCS \
.gamma_set = drm_atomic_helper_legacy_gamma_set, \
.set_config = drm_atomic_helper_set_config, \
@@ -15709,33 +15989,53 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
.disable_vblank = i8xx_disable_vblank,
};
+static struct intel_crtc *intel_crtc_alloc(void)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state) {
+ kfree(crtc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ crtc->base.state = &crtc_state->uapi;
+ crtc->config = crtc_state;
+
+ return crtc;
+}
+
+static void intel_crtc_free(struct intel_crtc *crtc)
+{
+ intel_crtc_destroy_state(&crtc->base, crtc->base.state);
+ kfree(crtc);
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state = NULL;
- struct intel_plane *primary = NULL;
- struct intel_plane *cursor = NULL;
+ struct intel_crtc *crtc;
int sprite, ret;
- intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
- if (!intel_crtc)
- return -ENOMEM;
+ crtc = intel_crtc_alloc();
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
- crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
- if (!crtc_state) {
- ret = -ENOMEM;
- goto fail;
- }
- __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
- intel_crtc->config = crtc_state;
+ crtc->pipe = pipe;
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(primary->id);
+ crtc->plane_ids_mask |= BIT(primary->id);
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -15745,7 +16045,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(plane);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(plane->id);
+ crtc->plane_ids_mask |= BIT(plane->id);
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -15753,7 +16053,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(cursor);
goto fail;
}
- intel_crtc->plane_ids_mask |= BIT(cursor->id);
+ crtc->plane_ids_mask |= BIT(cursor->id);
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv) ||
@@ -15774,42 +16074,32 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
funcs = &ilk_crtc_funcs;
}
- ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
+ ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
&primary->base, &cursor->base,
funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
- intel_crtc->pipe = pipe;
-
- /* initialize shared scalers */
- intel_crtc_init_scalers(intel_crtc, crtc_state);
-
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
- dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
+ dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
if (INTEL_GEN(dev_priv) < 9) {
enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
- dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
+ dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
}
- intel_color_init(intel_crtc);
+ intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
fail:
- /*
- * drm_mode_config_cleanup() will free up any
- * crtcs/planes already initialized.
- */
- kfree(crtc_state);
- kfree(intel_crtc);
+ intel_crtc_free(crtc);
return ret;
}
@@ -16310,26 +16600,23 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
stride_alignment = intel_fb_stride_alignment(fb, i);
-
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
- is_ccs_modifier(fb->modifier))
- stride_alignment *= 4;
-
if (fb->pitches[i] & (stride_alignment - 1)) {
DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
goto err;
}
+ if (is_gen12_ccs_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
fb->obj[i] = &obj->base;
}
@@ -16601,8 +16888,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
if (INTEL_GEN(dev_priv) >= 9)
@@ -16692,7 +16977,7 @@ retry:
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
crtc_state->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, crtc_state);
+ dev_priv->display.optimize_watermarks(intel_state, crtc);
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
@@ -16724,8 +17009,7 @@ static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
int ret = 0;
state = drm_atomic_state_alloc(dev);
@@ -16737,15 +17021,17 @@ static int intel_initial_commit(struct drm_device *dev)
retry:
state->acquire_ctx = &ctx;
- drm_for_each_crtc(crtc, dev) {
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
+
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
goto out;
}
- if (crtc_state->active) {
- ret = drm_atomic_add_affected_planes(state, crtc);
+ if (crtc_state->hw.active) {
+ ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -16755,7 +17041,7 @@ retry:
* having a proper LUT loaded. Remove once we
* have readout for pipe gamma enable.
*/
- crtc_state->color_mgmt_changed = true;
+ crtc_state->uapi.color_mgmt_changed = true;
}
}
@@ -17078,31 +17364,76 @@ static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
}
-static void intel_sanitize_crtc(struct intel_crtc *crtc,
- struct drm_modeset_acquire_ctx *ctx)
+static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /* Clear any frame start delays used for debugging left by the BIOS */
- if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
+ u32 val;
+
+ if (transcoder_is_dsi(cpu_transcoder))
+ return;
+
+ val = I915_READ(reg);
+ val &= ~HSW_FRAME_START_DELAY_MASK;
+ val |= HSW_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~PIPECONF_FRAME_START_DELAY_MASK;
+ val |= PIPECONF_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ }
+
+ if (!crtc_state->has_pch_encoder)
+ return;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
+ u32 val;
+
+ val = I915_READ(reg);
+ val &= ~TRANS_FRAME_START_DELAY_MASK;
+ val |= TRANS_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
+ } else {
+ enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
+ i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
+ u32 val;
- I915_WRITE(reg,
- I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ val = I915_READ(reg);
+ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
+ val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
+ I915_WRITE(reg, val);
}
+}
- if (crtc_state->base.active) {
+static void intel_sanitize_crtc(struct intel_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
struct intel_plane *plane;
+ /* Clear any frame start delays used for debugging left by the BIOS */
+ intel_sanitize_frame_start_delay(crtc_state);
+
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible &&
+ if (plane_state->uapi.visible &&
plane->base.type != DRM_PLANE_TYPE_PRIMARY)
intel_plane_disable_noatomic(crtc, plane);
}
@@ -17119,10 +17450,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
- intel_crtc_disable_noatomic(&crtc->base, ctx);
+ if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
+ intel_crtc_disable_noatomic(crtc, ctx);
- if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
+ if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -17153,7 +17484,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
/*
* Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
@@ -17166,7 +17497,7 @@ static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
* road.
*/
return IS_GEN(dev_priv, 6) &&
- crtc_state->base.active &&
+ crtc_state->hw.active &&
crtc_state->shared_dpll &&
crtc_state->port_clock == 0;
}
@@ -17183,7 +17514,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
bool has_active_crtc = crtc_state &&
- crtc_state->base.active;
+ crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
@@ -17287,22 +17618,22 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
- memset(crtc_state, 0, sizeof(*crtc_state));
- __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
+ intel_crtc_free_hw_state(crtc_state);
+ intel_crtc_state_reset(crtc_state, crtc);
- crtc_state->base.active = crtc_state->base.enable =
+ crtc_state->hw.active = crtc_state->hw.enable =
dev_priv->display.get_pipe_config(crtc, crtc_state);
- crtc->base.enabled = crtc_state->base.enable;
- crtc->active = crtc_state->base.active;
+ crtc->base.enabled = crtc_state->hw.enable;
+ crtc->active = crtc_state->hw.active;
- if (crtc_state->base.active)
+ if (crtc_state->hw.active)
dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->base.active));
+ enableddisabled(crtc_state->hw.active));
}
readout_plane_state(dev_priv);
@@ -17324,7 +17655,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->base.active &&
+ if (crtc_state->hw.active &&
crtc_state->shared_dpll == pll)
pll->state.crtc_mask |= 1 << crtc->pipe;
}
@@ -17369,15 +17700,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc = to_intel_crtc(encoder->base.crtc);
crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
- if (crtc_state && crtc_state->base.active) {
+ if (crtc_state && crtc_state->hw.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- crtc_state->base.connector_mask |=
+ crtc_state->uapi.connector_mask |=
drm_connector_mask(&connector->base);
- crtc_state->base.encoder_mask |=
+ crtc_state->uapi.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
} else {
@@ -17398,16 +17729,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- if (crtc_state->base.active) {
- struct drm_display_mode mode;
+ if (crtc_state->hw.active) {
+ struct drm_display_mode *mode = &crtc_state->hw.mode;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+ intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
crtc_state);
- mode = crtc_state->base.adjusted_mode;
- mode.hdisplay = crtc_state->pipe_src_w;
- mode.vdisplay = crtc_state->pipe_src_h;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
+ *mode = crtc_state->hw.adjusted_mode;
+ mode->hdisplay = crtc_state->pipe_src_w;
+ mode->vdisplay = crtc_state->pipe_src_h;
/*
* The initial mode needs to be set in order to keep
@@ -17418,11 +17748,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* set a flag to indicate that a full recalculation is
* needed on the next commit.
*/
- crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
+ mode->private_flags = I915_MODE_FLAG_INHERITED;
intel_crtc_compute_pixel_rate(crtc_state);
intel_crtc_update_active_timings(crtc_state);
+
+ intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
@@ -17433,14 +17765,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* FIXME don't have the fb yet, so can't
* use intel_plane_data_rate() :(
*/
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
/*
* FIXME don't have the fb yet, so can't
* use plane->min_cdclk() :(
*/
- if (plane_state->base.visible && plane->min_cdclk) {
+ if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide ||
INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
crtc_state->min_cdclk[plane->id] =
@@ -17454,7 +17786,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state->min_cdclk[plane->id]);
}
- if (crtc_state->base.active) {
+ if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (WARN_ON(min_cdclk < 0))
min_cdclk = 0;
@@ -17577,7 +17909,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
@@ -17610,11 +17941,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
* waits, so we need vblank interrupts restored beforehand.
*/
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->base.active)
+ if (crtc_state->hw.active)
intel_crtc_vblank_on(crtc_state);
}
@@ -17624,7 +17956,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
intel_sanitize_crtc(crtc, ctx);
intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
}
@@ -17657,17 +17991,16 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
}
for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
u64 put_domains;
- crtc_state = to_intel_crtc_state(crtc->base.state);
put_domains = modeset_get_crtc_power_domains(crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- intel_fbc_init_pipe_state(dev_priv);
}
void intel_display_resume(struct drm_device *dev)
@@ -17743,6 +18076,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
+
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f417e0948001..0fef9263cddc 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -332,8 +332,11 @@ enum phy_fia {
(__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+#define for_each_port(__port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for_each_port(__port) \
for_each_if((__ports_mask) & BIT(__port))
#define for_each_phy_masked(__phy, __phys_mask) \
@@ -377,6 +380,13 @@ enum phy_fia {
&(dev)->mode_config.encoder_list, \
base.head)
+#define for_each_intel_encoder_mask(dev, intel_encoder, encoder_mask) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head) \
+ for_each_if((encoder_mask) & \
+ drm_encoder_mask(&intel_encoder->base))
+
#define for_each_intel_dp(dev, intel_encoder) \
for_each_intel_encoder(dev, intel_encoder) \
for_each_if(intel_encoder_is_dp(intel_encoder))
@@ -446,10 +456,18 @@ enum phy_fia {
#define intel_atomic_crtc_state_for_each_plane_state( \
plane, plane_state, \
crtc_state) \
- for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
- ((crtc_state)->base.plane_mask)) \
+ for_each_intel_plane_mask(((crtc_state)->uapi.state->dev), (plane), \
+ ((crtc_state)->uapi.plane_mask)) \
for_each_if ((plane_state = \
- to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->uapi.state, &plane->base))))
+
+#define for_each_new_intel_connector_in_state(__state, connector, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->base.connectors[__i].ptr && \
+ ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
+ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
@@ -467,6 +485,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -499,9 +518,8 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
@@ -547,7 +565,6 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
-bool intel_crtc_active(struct intel_crtc *crtc);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
@@ -561,6 +578,8 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
+void skylake_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -582,6 +601,10 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv);
void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
+bool
+intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
+ uint64_t modifier);
+
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
@@ -603,9 +626,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
+#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index ce1b64f4dd44..679457156797 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -418,7 +418,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- int wa_idx_max;
+
+ WARN_ON(!IS_ICELAKE(dev_priv));
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -430,14 +431,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_enable(dev_priv, power_well);
- /* Display WA #1178: icl, tgl */
- if (IS_TIGERLAKE(dev_priv))
- wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
- else
- wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
-
- if (!IS_ELKHARTLAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
+ /* Display WA #1178: icl */
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
!intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
@@ -454,10 +449,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- if (INTEL_GEN(dev_priv) < 12) {
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
- }
+ WARN_ON(!IS_ICELAKE(dev_priv));
+
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -3688,6 +3683,151 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
+static const struct i915_power_well_desc ehl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+ },
+};
+
static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "always-on",
@@ -3832,7 +3972,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX A",
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3842,7 +3982,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX B",
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -3852,7 +3992,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
{
.name = "AUX C",
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
@@ -4162,6 +4302,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_GEN(dev_priv, 12)) {
err = set_power_wells(power_domains, tgl_power_wells);
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, ehl_power_wells);
} else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -4781,6 +4923,56 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_combo_phy_uninit(dev_priv);
}
+struct buddy_page_mask {
+ u32 page_mask;
+ u8 type;
+ u8 num_channels;
+};
+
+static const struct buddy_page_mask tgl_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
+ {}
+};
+
+static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
+ { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
+ { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
+ { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
+ { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
+ {}
+};
+
+static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
+{
+ enum intel_dram_type type = dev_priv->dram_info.type;
+ u8 num_channels = dev_priv->dram_info.num_channels;
+ const struct buddy_page_mask *table;
+ int i;
+
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ /* Wa_1409767108: tgl */
+ table = wa_1409767108_buddy_page_masks;
+ else
+ table = tgl_buddy_page_masks;
+
+ for (i = 0; table[i].page_mask != 0; i++)
+ if (table[i].num_channels == num_channels &&
+ table[i].type == type)
+ break;
+
+ if (table[i].page_mask == 0) {
+ DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
+ I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ } else {
+ I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
+ I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ }
+}
+
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -4813,6 +5005,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
/* 6. Setup MBUS. */
icl_mbus_init(dev_priv);
+ /* 7. Program arbiter BW_BUDDY registers */
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_bw_buddy_init(dev_priv);
+
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 1da04f3e0fb3..2608a65af7fa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -28,7 +28,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
+ /* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 1a7334dbe802..83ea04149b77 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -523,7 +523,24 @@ struct intel_atomic_state {
};
struct intel_plane_state {
- struct drm_plane_state base;
+ struct drm_plane_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * During initial hw readout, they need to be copied from uapi.
+ */
+ struct {
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ u16 alpha;
+ uint16_t pixel_blend_mode;
+ unsigned int rotation;
+ enum drm_color_encoding color_encoding;
+ enum drm_color_range color_range;
+ } hw;
+
struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long flags;
@@ -546,6 +563,9 @@ struct intel_plane_state {
/* plane color control register */
u32 color_ctl;
+ /* chroma upsampler control register */
+ u32 cus_ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -757,7 +777,33 @@ enum intel_output_format {
};
struct intel_crtc_state {
- struct drm_crtc_state base;
+ /*
+ * uapi (drm) state. This is the software state shown to userspace.
+ * In particular, the following members are used for bookkeeping:
+ * - crtc
+ * - state
+ * - *_changed
+ * - event
+ * - commit
+ * - mode_blob
+ */
+ struct drm_crtc_state uapi;
+
+ /*
+ * actual hardware state, the state we program to the hardware.
+ * The following members are used to verify the hardware state:
+ * - enable
+ * - active
+ * - mode / adjusted_mode
+ * - color property blobs.
+ *
+ * During initial hw readout, they need to be copied to uapi.
+ */
+ struct {
+ bool active, enable;
+ struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
+ struct drm_display_mode mode, adjusted_mode;
+ } hw;
/**
* quirks - bitfield with hw state readout quirks
@@ -1080,9 +1126,6 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*update_slave)(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
@@ -1113,12 +1156,12 @@ struct cxsr_latency {
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
-#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
+#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, uapi)
#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
struct intel_hdmi {
@@ -1528,6 +1571,24 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
&crtc->base));
}
+static inline struct intel_digital_connector_state *
+intel_atomic_get_new_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_new_connector_state(&state->base,
+ &connector->base));
+}
+
+static inline struct intel_digital_connector_state *
+intel_atomic_get_old_connector_state(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ return to_intel_digital_connector_state(
+ drm_atomic_get_old_connector_state(&state->base,
+ &connector->base));
+}
+
/* intel_display.c */
static inline bool
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7cbdac14d34b..2f31d226c6eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1889,32 +1889,15 @@ static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
drm_dp_sink_supports_fec(intel_dp->fec_capable);
}
-static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!INTEL_INFO(dev_priv)->display.has_dsc)
- return false;
-
- /* On TGL, DSC is supported on all Pipes */
- if (INTEL_GEN(dev_priv) >= 12)
- return true;
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A)
- return true;
-
- return false;
-}
-
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *crtc_state)
{
- if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
return false;
- return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ return intel_dsc_source_support(encoder, crtc_state) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
@@ -1999,7 +1982,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int bpp, clock, lane_count;
int mode_rate, link_clock, link_avail;
@@ -2046,6 +2029,63 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
return 0;
}
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ u8 line_buf_depth;
+ int ret;
+
+ ret = intel_dsc_compute_params(encoder, crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8. Eventually add
+ * logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
+}
+
static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2053,7 +2093,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u8 dsc_max_bpc;
int pipe_bpp;
int ret;
@@ -2132,7 +2172,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
}
- ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
+ ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
@@ -2164,7 +2204,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
@@ -2252,8 +2292,8 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int ret;
if (!drm_mode_is_420_only(info, adjusted_mode) ||
@@ -2281,7 +2321,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Our YCbCr output is always limited range.
@@ -2308,17 +2348,28 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
}
}
+static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ if (IS_G4X(dev_priv))
+ return false;
+ if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
+ return false;
+
+ return true;
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
@@ -2341,7 +2392,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
return ret;
pipe_config->has_drrs = false;
- if (IS_G4X(dev_priv) || port == PORT_A)
+ if (!intel_dp_port_has_audio(dev_priv, port))
pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio;
@@ -2433,8 +2484,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
pipe_config->lane_count,
@@ -3031,7 +3082,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -3071,7 +3122,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -3231,7 +3282,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
enum port port = encoder->port;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3266,7 +3317,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -3283,7 +3334,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->port_clock = 270000;
}
- pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->hw.adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
@@ -3498,7 +3549,7 @@ static void intel_enable_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
@@ -3631,7 +3682,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4153,7 +4204,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
u32 DP = intel_dp->DP;
@@ -5076,7 +5127,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (conn_state->commit &&
@@ -5482,7 +5533,7 @@ static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
-static bool icl_digital_port_connected(struct intel_encoder *encoder)
+static bool icp_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
@@ -5520,9 +5571,9 @@ static bool __intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (INTEL_GEN(dev_priv) >= 11)
- return icl_digital_port_connected(encoder);
- else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ return icp_digital_port_connected(encoder);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
@@ -6909,7 +6960,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
int refresh_rate)
{
struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
@@ -6942,7 +6993,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (!crtc_state->base.active) {
+ if (!crtc_state->hw.active) {
DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03d1cba0b696..7aa0975c33b7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -42,13 +42,13 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
void *port = connector->port;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
@@ -99,7 +99,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
void *port = connector->port;
struct link_config_limits limits;
int ret;
@@ -168,7 +168,6 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct intel_connector *intel_connector =
to_intel_connector(connector);
struct drm_crtc *new_crtc = new_conn_state->crtc;
- struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
@@ -183,11 +182,16 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
* connector
*/
if (new_crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(intel_state,
+ intel_crtc);
if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(crtc_state) ||
- crtc_state->enable)
+ !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
+ crtc_state->uapi.enable)
return 0;
}
@@ -231,8 +235,32 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ bool last_mst_stream;
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ intel_dp->active_mst_links--;
+ last_mst_stream = intel_dp->active_mst_links == 0;
+
+ intel_crtc_vblank_off(old_crtc_state);
+
+ intel_disable_pipe(old_crtc_state);
+
+ intel_ddi_disable_transcoder_func(old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skylake_scaler_disable(old_crtc_state);
+ else
+ ironlake_pfit_disable(old_crtc_state);
+
+ /*
+ * From TGL spec: "If multi-stream slave transcoder: Configure
+ * Transcoder Clock Select to direct no clock to the transcoder"
+ *
+ * From older GENs spec: "Configure Transcoder Clock Select to direct
+ * no clock to the transcoder"
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !last_mst_stream)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -248,14 +276,10 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
- intel_dp->active_mst_links--;
-
intel_mst->connector = NULL;
- if (intel_dp->active_mst_links == 0) {
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ if (last_mst_stream)
intel_dig_port->base.post_disable(&intel_dig_port->base,
old_crtc_state, NULL);
- }
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
@@ -273,20 +297,6 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
pipe_config, NULL);
}
-static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
-
- if (intel_dp->active_mst_links == 0)
- intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
- old_crtc_state,
- old_conn_state);
-}
-
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -299,21 +309,23 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
to_intel_connector(conn_state->connector);
int ret;
u32 temp;
+ bool first_mst_stream;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
*/
connector->encoder = encoder;
intel_mst->connector = connector;
+ first_mst_stream = intel_dp->active_mst_links == 0;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
- if (intel_dp->active_mst_links == 0)
+ if (first_mst_stream)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -330,7 +342,15 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
- intel_ddi_enable_pipe_clock(pipe_config);
+ /*
+ * Before Gen 12 this is not done as part of
+ * intel_dig_port->base.pre_enable() and should be done here. For
+ * Gen 12+ the step in which this should be done is different for the
+ * first MST stream, so it's done on the DDI for the first stream and
+ * here for the following ones.
+ */
+ if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
+ intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -633,7 +653,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
- intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 556d1b30f06a..704f38681c4b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -783,7 +783,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
@@ -864,7 +864,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
int data, i, stagger;
@@ -953,7 +953,7 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
u32 val;
vlv_dpio_get(dev_priv);
@@ -1016,7 +1016,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
@@ -1046,7 +1046,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
u32 val;
@@ -1075,7 +1075,7 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
enum pipe pipe = crtc->pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 3ce0a023eee0..728a4b045de7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -136,7 +136,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
*/
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -163,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
*/
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -208,7 +208,7 @@ out:
*/
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
@@ -842,7 +842,7 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
static struct intel_shared_dpll *
hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
int clock = crtc_state->port_clock;
@@ -1751,7 +1751,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -2274,7 +2274,7 @@ static bool
cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
@@ -2553,7 +2553,7 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
dev_priv->cdclk.hw.ref == 24000 ?
icl_dp_combo_pll_24MHz_values :
@@ -2575,7 +2575,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
switch (dev_priv->cdclk.hw.ref) {
@@ -2612,7 +2612,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
@@ -2744,7 +2744,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index bb5a0e91b370..ada006a690df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -102,43 +102,50 @@ intel_dsb_get(struct intel_crtc *crtc)
struct intel_dsb *dsb = &crtc->dsb;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ u32 *buf;
intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
return dsb;
- if (atomic_add_return(1, &dsb->refcount) != 1)
+ if (dsb->refcount++ != 0)
return dsb;
- dsb->id = DSB1;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Gem object creation failed\n");
- goto err;
+ goto out;
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
DRM_ERROR("Vma creation failed\n");
i915_gem_object_put(obj);
- atomic_dec(&dsb->refcount);
- goto err;
+ goto out;
}
- dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
- if (IS_ERR(dsb->cmd_buf)) {
+ buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
DRM_ERROR("Command buffer creation failed\n");
- i915_vma_unpin_and_release(&vma, 0);
- dsb->cmd_buf = NULL;
- atomic_dec(&dsb->refcount);
- goto err;
+ goto out;
}
+
+ dsb->id = DSB1;
dsb->vma = vma;
+ dsb->cmd_buf = buf;
+
+out:
+ /*
+ * On error dsb->cmd_buf will continue to be NULL, making the writes
+ * pass-through. Leave the dangling ref to be removed later by the
+ * corresponding intel_dsb_put(): the important error message will
+ * already be logged above.
+ */
-err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
return dsb;
}
@@ -158,10 +165,10 @@ void intel_dsb_put(struct intel_dsb *dsb)
if (!HAS_DSB(i915))
return;
- if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+ if (WARN_ON(dsb->refcount == 0))
return;
- if (atomic_dec_and_test(&dsb->refcount)) {
+ if (--dsb->refcount == 0) {
i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
dsb->cmd_buf = NULL;
dsb->free_pos = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6f95c8e909e6..395ef9ce558e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -22,7 +22,7 @@ enum dsb_id {
};
struct intel_dsb {
- atomic_t refcount;
+ long refcount;
enum dsb_id id;
u32 *cmd_buf;
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index bcfbcb743e7d..a74dc5b915d1 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -178,9 +178,9 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_NVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder,
@@ -207,8 +207,8 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
- &pipe_config->base.mode,
- &pipe_config->base.adjusted_mode);
+ &pipe_config->hw.mode,
+ &pipe_config->hw.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
@@ -253,7 +253,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/*
* If we have timings from the BIOS for the panel, put them in
@@ -277,8 +277,8 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
enum pipe pipe = crtc->pipe;
u32 dvo_val;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 20616639b8ab..a1048ece541e 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -50,11 +50,6 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv)
return HAS_FBC(dev_priv);
}
-static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
-{
- return INTEL_GEN(dev_priv) <= 3;
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -73,7 +68,7 @@ static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
*/
-static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
+static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
int *width, int *height)
{
if (width)
@@ -83,7 +78,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
}
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
- struct intel_fbc_state_cache *cache)
+ const struct intel_fbc_state_cache *cache)
{
int lines;
@@ -143,8 +138,10 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
u32 fbc_ctl2;
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
+ if (params->fence_id >= 0)
+ fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
}
@@ -156,7 +153,8 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= params->vma->fence->id;
+ if (params->fence_id >= 0)
+ fbc_ctl |= params->fence_id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -176,8 +174,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->flags & PLANE_HAS_FENCE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
+ if (params->fence_id >= 0) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -234,14 +232,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN(dev_priv, 5))
- dpfc_ctl |= params->vma->fence->id;
+ dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -253,8 +251,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
- I915_WRITE(ILK_FBC_RT_BASE,
- i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -285,13 +281,12 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
- if (i915_gem_object_get_tiling(params->vma->obj) !=
- I915_TILING_X)
+ if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
I915_WRITE(CHICKEN_MISC_4, val);
@@ -317,11 +312,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->flags & PLANE_HAS_FENCE) {
+ if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
- params->vma->fence->id);
+ params->fence_id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -367,6 +362,7 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
fbc->active = true;
+ fbc->activated = true;
if (INTEL_GEN(dev_priv) >= 7)
gen7_fbc_activate(dev_priv);
@@ -419,29 +415,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
fbc->no_fbc_reason = reason;
}
-static bool multiple_pipes_ok(struct intel_crtc *crtc,
- struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_fbc *fbc = &dev_priv->fbc;
- enum pipe pipe = crtc->pipe;
-
- /* Don't even bother tracking anything we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return true;
-
- if (plane_state->base.visible)
- fbc->visible_pipes_mask |= (1 << pipe);
- else
- fbc->visible_pipes_mask &= ~(1 << pipe);
-
- return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
-}
-
static int find_compression_threshold(struct drm_i915_private *dev_priv,
struct drm_mm_node *node,
- int size,
- int fb_cpp)
+ unsigned int size,
+ unsigned int fb_cpp)
{
int compression_threshold = 1;
int ret;
@@ -487,18 +464,15 @@ again:
}
}
-static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
+static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
+ unsigned int size, unsigned int fb_cpp)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
- int size, fb_cpp, ret;
+ int ret;
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
- size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
- fb_cpp = fbc->state_cache.fb.format->cpp[0];
-
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
if (!ret)
@@ -656,46 +630,55 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
}
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_framebuffer *fb = plane_state->hw.fb;
- cache->vma = NULL;
- cache->flags = 0;
+ cache->plane.visible = plane_state->uapi.visible;
+ if (!cache->plane.visible)
+ return;
- cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+ cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
- cache->plane.rotation = plane_state->base.rotation;
+ cache->plane.rotation = plane_state->hw.rotation;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
- cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
- cache->plane.visible = plane_state->base.visible;
+ cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
- cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.y = plane_state->uapi.src.y1 >> 16;
- cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
-
- if (!cache->plane.visible)
- return;
+ cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- cache->vma = plane_state->vma;
- cache->flags = plane_state->flags;
- if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
- cache->flags &= ~PLANE_HAS_FENCE;
+ WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
+
+ if (plane_state->flags & PLANE_HAS_FENCE &&
+ plane_state->vma->fence)
+ cache->fence_id = plane_state->vma->fence->id;
+ else
+ cache->fence_id = -1;
+}
+
+static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ fbc->compressed_fb.size * fbc->threshold;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -704,6 +687,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!cache->plane.visible) {
+ fbc->no_fbc_reason = "primary plane not visible";
+ return false;
+ }
+
/* We don't need to use a state cache here since this information is
* global for all CRTC.
*/
@@ -712,11 +700,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!cache->vma) {
- fbc->no_fbc_reason = "primary plane not visible";
- return false;
- }
-
if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
fbc->no_fbc_reason = "incompatible mode";
return false;
@@ -740,7 +723,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* For now this will effecively disable FBC with 90/270 degree
* rotation.
*/
- if (!(cache->flags & PLANE_HAS_FENCE)) {
+ if (cache->fence_id < 0) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -783,8 +766,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* we didn't get any invalidate/deactivate calls, but this would require
* a lot of tracking just for a specific case. If we conclude it's an
* important case, we can implement it later. */
- if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
- fbc->compressed_fb.size * fbc->threshold) {
+ if (intel_fbc_cfb_size_changed(dev_priv)) {
fbc->no_fbc_reason = "CFB requirements changed";
return false;
}
@@ -794,7 +776,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 10) &&
+ if (INTEL_GEN(dev_priv) >= 9 &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
@@ -837,8 +819,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
- params->vma = cache->vma;
- params->flags = cache->flags;
+ params->fence_id = cache->fence_id;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
@@ -849,39 +830,88 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
- if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
- params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
- 32 * fbc->threshold) * 8;
+ params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+
+ params->plane_visible = cache->plane.visible;
+}
+
+static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct intel_fbc_reg_params *params = &fbc->params;
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
+ return false;
+
+ if (!params->plane_visible)
+ return false;
+
+ if (!intel_fbc_can_activate(crtc))
+ return false;
+
+ if (params->fb.format != cache->fb.format)
+ return false;
+
+ if (params->fb.stride != cache->fb.stride)
+ return false;
+
+ if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+ return false;
+
+ if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+ return false;
+
+ return true;
}
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
+ bool need_vblank_wait = false;
if (!fbc_supported(dev_priv))
- return;
+ return need_vblank_wait;
mutex_lock(&fbc->lock);
- if (!multiple_pipes_ok(crtc, plane_state)) {
- reason = "more than one pipe active";
- goto deactivate;
- }
-
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
goto unlock;
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
fbc->flip_pending = true;
-deactivate:
- intel_fbc_deactivate(dev_priv, reason);
+ if (!intel_fbc_can_flip_nuke(crtc_state)) {
+ intel_fbc_deactivate(dev_priv, reason);
+
+ /*
+ * Display WA #1198: glk+
+ * Need an extra vblank wait between FBC disable and most plane
+ * updates. Bspec says this is only needed for plane disable, but
+ * that is not true. Touching most plane registers will cause the
+ * corruption to appear. Also SKL/derivatives do not seem to be
+ * affected.
+ *
+ * TODO: could optimize this a bit by sampling the frame
+ * counter when we disable FBC (if it was already done earlier)
+ * and skipping the extra vblank wait before the plane update
+ * if at least one frame has already passed.
+ */
+ if (fbc->activated &&
+ (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ need_vblank_wait = true;
+ fbc->activated = false;
+ }
unlock:
mutex_unlock(&fbc->lock);
+
+ return need_vblank_wait;
}
/**
@@ -897,14 +927,13 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_crtc *crtc = fbc->crtc;
WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->enabled);
+ WARN_ON(!fbc->crtc);
WARN_ON(fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
__intel_fbc_cleanup_cfb(dev_priv);
- fbc->enabled = false;
fbc->crtc = NULL;
}
@@ -915,11 +944,10 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
WARN_ON(!mutex_is_locked(&fbc->lock));
- if (!fbc->enabled || fbc->crtc != crtc)
+ if (fbc->crtc != crtc)
return;
fbc->flip_pending = false;
- WARN_ON(fbc->active);
if (!i915_modparams.enable_fbc) {
intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
@@ -933,10 +961,9 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
if (!intel_fbc_can_activate(crtc))
return;
- if (!fbc->busy_bits) {
- intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+ if (!fbc->busy_bits)
intel_fbc_hw_activate(dev_priv);
- } else
+ else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
@@ -955,7 +982,7 @@ void intel_fbc_post_update(struct intel_crtc *crtc)
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
- if (fbc->enabled)
+ if (fbc->crtc)
return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
else
return fbc->possible_framebuffer_bits;
@@ -977,7 +1004,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
- if (fbc->enabled && fbc->busy_bits)
+ if (fbc->crtc && fbc->busy_bits)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
mutex_unlock(&fbc->lock);
@@ -998,7 +1025,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
goto out;
- if (!fbc->busy_bits && fbc->enabled &&
+ if (!fbc->busy_bits && fbc->crtc &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
intel_fbc_recompress(dev_priv);
@@ -1047,12 +1074,12 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* to pipe or plane A. */
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
if (!plane->has_fbc)
continue;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
continue;
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
@@ -1081,42 +1108,53 @@ out:
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
if (!fbc_supported(dev_priv))
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
- WARN_ON(fbc->crtc == NULL);
- if (fbc->crtc == crtc) {
- WARN_ON(!crtc_state->enable_fbc);
- WARN_ON(fbc->active);
- }
- goto out;
- }
+ if (fbc->crtc) {
+ if (fbc->crtc != crtc ||
+ !intel_fbc_cfb_size_changed(dev_priv))
+ goto out;
- if (!crtc_state->enable_fbc)
- goto out;
+ __intel_fbc_disable(dev_priv);
+ }
WARN_ON(fbc->active);
- WARN_ON(fbc->crtc != NULL);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
- if (intel_fbc_alloc_cfb(crtc)) {
+
+ /* FIXME crtc_state->enable_fbc lies :( */
+ if (!cache->plane.visible)
+ goto out;
+
+ if (intel_fbc_alloc_cfb(dev_priv,
+ intel_fbc_calculate_cfb_size(dev_priv, cache),
+ fb->format->cpp[0])) {
+ cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
+ if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED)
+ cache->gen9_wa_cfb_stride =
+ DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ else
+ cache->gen9_wa_cfb_stride = 0;
+
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
- fbc->enabled = true;
fbc->crtc = crtc;
out:
mutex_unlock(&fbc->lock);
@@ -1156,7 +1194,7 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
return;
mutex_lock(&fbc->lock);
- if (fbc->enabled) {
+ if (fbc->crtc) {
WARN_ON(fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
@@ -1172,7 +1210,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
- if (fbc->underrun_detected || !fbc->enabled)
+ if (fbc->underrun_detected || !fbc->crtc)
goto out;
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
@@ -1244,28 +1282,6 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
schedule_work(&fbc->underrun_work);
}
-/**
- * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
- * @dev_priv: i915 device instance
- *
- * The FBC code needs to track CRTC visibility since the older platforms can't
- * have FBC enabled while multiple pipes are used. This function does the
- * initial setup at driver load to make sure FBC is matching the real hardware.
- */
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- /* Don't even bother tracking anything if we don't need. */
- if (!no_fbc_on_multiple_pipes(dev_priv))
- return;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- if (intel_crtc_active(crtc) &&
- crtc->base.primary->state->visible)
- dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
-}
-
/*
* The DDX driver changes its behavior depending on the value it reads from
* i915.enable_fbc, so sanitize it by translating the default value into either
@@ -1283,10 +1299,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- return 0;
-
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
@@ -1317,7 +1329,6 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
mutex_init(&fbc->lock);
- fbc->enabled = false;
fbc->active = false;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272eda8d43..c8a5e5098687 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,15 +19,14 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+bool intel_fbc_pre_update(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 84b164f31895..6cb02c912acc 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
spin_unlock(&obj->vma.lock);
- obj->frontbuffer = NULL;
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
i915_gem_object_put(obj);
- kfree(front);
+ kfree_rcu(front, rcu);
}
struct intel_frontbuffer *
@@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_frontbuffer *front;
- spin_lock(&i915->fb_tracking.lock);
- front = obj->frontbuffer;
- if (front)
- kref_get(&front->ref);
- spin_unlock(&i915->fb_tracking.lock);
+ front = __intel_frontbuffer_get(obj);
if (front)
return front;
@@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
- if (obj->frontbuffer) {
+ if (rcu_access_pointer(obj->frontbuffer)) {
kfree(front);
- front = obj->frontbuffer;
+ front = rcu_dereference_protected(obj->frontbuffer, true);
kref_get(&front->ref);
} else {
i915_gem_object_get(obj);
- obj->frontbuffer = front;
+ rcu_assign_pointer(obj->frontbuffer, front);
}
spin_unlock(&i915->fb_tracking.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index adc64d61a4a5..6d41f5394425 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -27,10 +27,10 @@
#include <linux/atomic.h>
#include <linux/kref.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active.h"
struct drm_i915_private;
-struct drm_i915_gem_object;
enum fb_op_origin {
ORIGIN_GTT,
@@ -45,6 +45,7 @@ struct intel_frontbuffer {
atomic_t bits;
struct i915_active write;
struct drm_i915_gem_object *obj;
+ struct rcu_head rcu;
};
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
@@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+ struct intel_frontbuffer *front;
+
+ if (likely(!rcu_access_pointer(obj->frontbuffer)))
+ return NULL;
+
+ rcu_read_lock();
+ do {
+ front = rcu_dereference(obj->frontbuffer);
+ if (!front)
+ break;
+
+ if (unlikely(!kref_get_unless_zero(&front->ref)))
+ continue;
+
+ if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+ break;
+
+ intel_frontbuffer_put(front);
+ } while (1);
+ rcu_read_unlock();
+
+ return front;
+}
+
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
@@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index a448815d8fc2..0fdbd39f6641 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1870,7 +1870,7 @@ static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
return false;
return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv));
+ IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
}
void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index f56fffc474fa..685589064d10 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -285,7 +285,7 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -321,7 +321,7 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -340,7 +340,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -362,7 +362,7 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -401,7 +401,7 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -420,7 +420,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -438,7 +438,7 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -474,7 +474,7 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
void *frame, ssize_t len)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 val, *data = frame;
int i;
@@ -493,7 +493,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
@@ -708,7 +708,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
int ret;
@@ -804,7 +804,7 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
- &crtc_state->base.adjusted_mode);
+ &crtc_state->hw.adjusted_mode);
if (WARN_ON(ret))
return false;
@@ -965,7 +965,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -990,7 +990,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
if ((crtc_state->infoframes.enable &
@@ -1027,7 +1027,7 @@ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
- &crtc_state->base.adjusted_mode))
+ &crtc_state->hw.adjusted_mode))
crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
@@ -1037,7 +1037,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -1096,7 +1096,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1145,7 +1145,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -1736,9 +1736,9 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
u32 hdmi_val;
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -1829,7 +1829,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
tmp & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
dotclock = pipe_config->port_clock * 2 / 3;
@@ -1839,7 +1839,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
@@ -1860,7 +1860,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
WARN_ON(!pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
@@ -1946,7 +1946,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -2010,7 +2010,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -2210,12 +2210,12 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
int bpc)
{
struct drm_i915_private *dev_priv =
- to_i915(crtc_state->base.crtc->dev);
- struct drm_atomic_state *state = crtc_state->base.state;
+ to_i915(crtc_state->uapi.crtc->dev);
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int i;
if (HAS_GMCH(dev_priv))
@@ -2240,7 +2240,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
for_each_new_connector_in_state(state, connector, connector_state, i) {
const struct drm_display_info *info = &connector->display_info;
- if (connector_state->crtc != crtc_state->base.crtc)
+ if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
@@ -2281,7 +2281,7 @@ static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
struct intel_crtc_state *config)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
if (!connector->ycbcr_420_allowed) {
DRM_ERROR("Platform doesn't support YCBCR420 output\n");
@@ -2336,7 +2336,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
@@ -2378,7 +2378,7 @@ static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_s
const struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
/*
* Our YCbCr output is always limited range.
@@ -2406,7 +2406,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
@@ -2451,8 +2451,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- /* Set user selected PAR to incoming mode's member */
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
pipe_config->lane_count = 4;
@@ -2872,7 +2873,6 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
drm_object_attach_property(&connector->base,
@@ -3131,20 +3131,29 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i2c_adapter *ddc;
enum port port = intel_encoder->port;
struct cec_connector_info conn_info;
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
+ if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ return;
+
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
intel_dig_port->max_lanes, intel_encoder->base.base.id,
intel_encoder->base.name))
return;
- drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ drm_connector_init_with_ddc(dev, connector,
+ &intel_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
connector->interlace_allowed = 1;
@@ -3154,10 +3163,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
-
- if (WARN_ON(port == PORT_A))
- return;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
if (HAS_DDI(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index f8f1308643a9..5145ff8b962b 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -189,7 +189,7 @@ void lspcon_ycbcr420_config(struct drm_connector *connector,
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (drm_mode_is_420_only(info, adjusted_mode) &&
connector->ycbcr_420_allowed) {
@@ -475,7 +475,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b1bc78623647..10696bb99dcf 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -135,7 +135,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
else
flags |= DRM_MODE_FLAG_PVSYNC;
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
if (INTEL_GEN(dev_priv) < 5)
pipe_config->gmch_pfit.lvds_border_bits =
@@ -148,7 +148,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
+ pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
@@ -230,8 +230,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -392,8 +392,8 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
lvds_encoder->attached_connector;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
unsigned int lvds_bpp;
/* Should never happen!! */
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 969ade623691..e59b4992ba1b 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -941,6 +941,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ /*
+ * Indicate we handle monitor hotplug events ourselves so we do
+ * not need ACPI notifications for them. Disabling these avoids
+ * triggering the AML code doing the notifation, which may be
+ * broken as Windows also seems to disable these.
+ */
+ opregion->acpi->chpd = 1;
}
if (mboxes & MBOX_SWSCI) {
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 848ce07a8ec2..e40c3a0e2cd7 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
+ struct intel_frontbuffer *from = NULL, *to = NULL;
WARN_ON(overlay->old_vma);
- intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
- vma ? vma->obj->frontbuffer : NULL,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ if (overlay->vma)
+ from = intel_frontbuffer_get(overlay->vma->obj);
+ if (vma)
+ to = intel_frontbuffer_get(vma->obj);
+
+ intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ if (to)
+ intel_frontbuffer_put(to);
+ if (from)
+ intel_frontbuffer_put(from);
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -668,8 +677,8 @@ static void update_colorkey(struct intel_overlay *overlay,
if (overlay->color_key_enabled)
flags |= DST_KEY_ENABLE;
- if (state->base.visible)
- format = state->base.fb->format->format;
+ if (state->uapi.visible)
+ format = state->hw.fb->format->format;
switch (format) {
case DRM_FORMAT_C8:
@@ -758,15 +767,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE);
- i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
}
- intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+ i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
@@ -1326,12 +1333,14 @@ err_put_bo:
void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
+ struct intel_engine_cs *engine;
int ret;
if (!HAS_OVERLAY(dev_priv))
return;
- if (!HAS_ENGINE(dev_priv, RCS0))
+ engine = dev_priv->engine[RCS0];
+ if (!engine || !engine->kernel_context)
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
@@ -1339,7 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
return;
overlay->i915 = dev_priv;
- overlay->context = dev_priv->engine[RCS0]->kernel_context;
+ overlay->context = engine->kernel_context;
GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index bc14e9c0285a..7b3ec6eb3382 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -178,7 +178,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config,
int fitting_mode)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int x = 0, y = 0, width = 0, height = 0;
/* Native modes don't need fitting */
@@ -300,7 +300,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control)
{
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -321,7 +321,7 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
u32 *pfit_control, u32 *pfit_pgm_ratios,
u32 *border)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
u32 scaled_width = adjusted_mode->crtc_hdisplay *
pipe_config->pipe_src_h;
u32 scaled_height = pipe_config->pipe_src_w *
@@ -380,7 +380,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
@@ -1047,7 +1047,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -1077,7 +1077,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
@@ -1189,7 +1189,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
@@ -1840,13 +1840,22 @@ static int pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
+ const char *desc;
int retval;
- /* Get the PWM chip for backlight control */
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_backlight");
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to own the pwm chip\n");
+ DRM_ERROR("Failed to get the %s PWM chip\n", desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1873,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
CRC_PMIC_PWM_PERIOD_NS);
panel->backlight.enabled = panel->backlight.level != 0;
+ DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 6260a2082719..2746512f4466 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -309,13 +309,13 @@ retry:
goto put_state;
}
- pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->uapi.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
if (IS_HASWELL(dev_priv) &&
- pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->hw.active && crtc->pipe == PIPE_A &&
pipe_config->cpu_transcoder == TRANSCODER_EDP)
- pipe_config->base.mode_changed = true;
+ pipe_config->uapi.mode_changed = true;
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6a9f322d3fca..16e9ff47d519 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include "display/intel_dp.h"
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_display_types.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -401,7 +402,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
/* Enable ALPM at sink for psr2 */
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
+ DP_ALPM_ENABLE |
+ DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
+
dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
} else {
if (dev_priv->psr.link_standby)
@@ -536,11 +539,11 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
{
- if (!cstate || !cstate->base.active)
+ if (!cstate || !cstate->hw.active)
return 0;
return DIV_ROUND_UP(1000 * 1000,
- drm_mode_vrefresh(&cstate->base.adjusted_mode));
+ drm_mode_vrefresh(&cstate->hw.adjusted_mode));
}
static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
@@ -605,9 +608,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
- int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
- int psr_max_h = 0, psr_max_v = 0;
+ int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
+ int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+ int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -631,12 +634,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
+ max_bpp = 30;
} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
+ max_bpp = 24;
} else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
+ max_bpp = 24;
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
@@ -646,6 +652,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->pipe_bpp > max_bpp) {
+ DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
+ return false;
+ }
+
/*
* HW sends SU blocks of size four scan lines, which means the starting
* X coordinate and Y granularity requirements will always be met. We
@@ -672,7 +684,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int psr_setup_time;
if (!CAN_PSR(dev_priv))
@@ -792,7 +804,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
@@ -924,6 +936,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ if (dev_priv->psr.psr2_enabled)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
+
dev_priv->psr.enabled = false;
}
@@ -1039,7 +1054,7 @@ unlock:
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
@@ -1096,7 +1111,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
int err;
state = drm_atomic_state_alloc(dev);
@@ -1107,21 +1122,18 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
state->acquire_ctx = &ctx;
retry:
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
- struct intel_crtc_state *intel_crtc_state;
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_crtc_state(state, crtc);
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
err = PTR_ERR(crtc_state);
goto error;
}
- intel_crtc_state = to_intel_crtc_state(crtc_state);
-
- if (crtc_state->active && intel_crtc_state->has_psr) {
+ if (crtc_state->hw.active && crtc_state->has_psr) {
/* Mark mode as changed to trigger a pipe->update() */
- crtc_state->mode_changed = true;
+ crtc_state->uapi.mode_changed = true;
break;
}
}
@@ -1379,11 +1391,80 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->psr.lock);
}
-void intel_psr_short_pulse(struct intel_dp *intel_dp)
+static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
+ u8 *status, u8 *error_status)
+{
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ if (ret != 1)
+ return ret;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ if (ret != 1)
+ return ret;
+
+ *status = *status & DP_PSR_SINK_STATE_MASK;
+
+ return 0;
+}
+
+static void psr_alpm_check(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_dp_aux *aux = &intel_dp->aux;
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ int r;
+
+ if (!psr->psr2_enabled)
+ return;
+
+ r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading ALPM status\n");
+ return;
+ }
+
+ if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+
+ /* Clearing error */
+ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
+ }
+}
+
+static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct i915_psr *psr = &dev_priv->psr;
u8 val;
+ int r;
+
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
+ if (r != 1) {
+ DRM_ERROR("Error reading DP_PSR_ESI\n");
+ return;
+ }
+
+ if (val & DP_PSR_CAPS_CHANGE) {
+ intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+
+ /* Clearing it */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
+ }
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
DP_PSR_LINK_CRC_ERROR;
@@ -1396,38 +1477,34 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->enabled || psr->dp != intel_dp)
goto exit;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
+ DRM_ERROR("Error reading PSR status or error status\n");
goto exit;
}
- if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
- DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
- goto exit;
- }
-
- if (val & DP_PSR_RFB_STORAGE_ERROR)
+ if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ if (error_status & DP_PSR_RFB_STORAGE_ERROR)
DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
- if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
- if (val & DP_PSR_LINK_CRC_ERROR)
+ if (error_status & DP_PSR_LINK_CRC_ERROR)
DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
- if (val & ~errors)
+ if (error_status & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- val & ~errors);
- if (val & errors) {
- intel_psr_disable_locked(intel_dp);
- psr->sink_not_reliable = true;
- }
+ error_status & ~errors);
/* clear status register */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
+
+ psr_alpm_check(intel_dp);
+ psr_capability_changed_check(intel_dp);
+
exit:
mutex_unlock(&psr->lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 5b7f4baf7348..8758ee2a4442 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1087,7 +1087,7 @@ static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
{
struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
int ret;
if (!crtc_state->has_hdmi_sink)
@@ -1276,8 +1276,8 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
to_intel_sdvo_connector_state(conn_state);
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct drm_display_mode *mode = &pipe_config->base.mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
+ struct drm_display_mode *mode = &pipe_config->hw.mode;
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
@@ -1349,9 +1349,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (IS_TV(intel_sdvo_connector))
i9xx_adjust_sdvo_tv_clock(pipe_config);
- /* Set user selected PAR to incoming mode's member */
- if (intel_sdvo_connector->is_hdmi)
- adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (conn_state->picture_aspect_ratio)
+ adjusted_mode->picture_aspect_ratio =
+ conn_state->picture_aspect_ratio;
if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
pipe_config, conn_state)) {
@@ -1429,13 +1429,13 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
const struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(conn_state->connector);
- const struct drm_display_mode *mode = &crtc_state->base.mode;
+ const struct drm_display_mode *mode = &crtc_state->hw.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1629,7 +1629,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_NVSYNC;
}
- pipe_config->base.adjusted_mode.flags |= flags;
+ pipe_config->hw.adjusted_mode.flags |= flags;
/*
* pixel multiplier readout is tricky: Only on i915g/gm it is stored in
@@ -1649,7 +1649,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1701,7 +1701,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
u8 *eld = connector->eld;
@@ -1723,7 +1723,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
if (old_crtc_state->has_audio)
@@ -1785,7 +1785,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
u32 temp;
bool input1, input2;
int i;
@@ -2654,7 +2654,6 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
intel_attach_broadcast_rgb_property(&connector->base.base);
}
intel_attach_aspect_ratio_property(&connector->base.base);
- connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 72fda0430062..3f7b8f2ff671 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -81,9 +81,9 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -120,7 +120,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
- trace_i915_pipe_update_start(crtc);
+ trace_intel_pipe_update_start(crtc);
for (;;) {
/*
@@ -173,7 +173,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
- trace_i915_pipe_update_vblank_evaded(crtc);
+ trace_intel_pipe_update_vblank_evaded(crtc);
return;
irq_disable:
@@ -190,27 +190,28 @@ irq_disable:
*/
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+ trace_intel_pipe_update_end(crtc, end_vbl_count, scanline_end);
/* We're still in the vblank-evade critical section, this can't race.
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (new_crtc_state->base.event) {
+ if (new_crtc_state->uapi.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
+ drm_crtc_arm_vblank_event(&crtc->base,
+ new_crtc_state->uapi.event);
spin_unlock(&crtc->base.dev->event_lock);
- new_crtc_state->base.event = NULL;
+ new_crtc_state->uapi.event = NULL;
}
local_irq_enable();
@@ -239,9 +240,9 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
u32 stride, max_stride;
/*
@@ -251,7 +252,7 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
* kick in due the plane being invisible.
*/
if (intel_plane_can_remap(plane_state) &&
- !plane_state->base.visible)
+ !plane_state->uapi.visible)
return 0;
/* FIXME other color planes? */
@@ -271,10 +272,10 @@ int intel_plane_check_stride(const struct intel_plane_state *plane_state)
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_rect *src = &plane_state->base.src;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_rect *src = &plane_state->uapi.src;
u32 src_x, src_y, src_w, src_h, hsub, vsub;
- bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
+ bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -327,8 +328,8 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
if (fb->format->cpp[0] == 8) {
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
@@ -347,7 +348,7 @@ skl_plane_ratio(const struct intel_crtc_state *crtc_state,
static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
unsigned int pixel_rate = crtc_state->pixel_rate;
unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
@@ -358,10 +359,10 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
/* Downscaling limits the maximum pixel rate */
dst_w = min(src_w, dst_w);
@@ -395,28 +396,28 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[scaler_id];
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
- hscale = drm_rect_calc_hscale(&plane_state->base.src,
- &plane_state->base.dst,
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
- vscale = drm_rect_calc_vscale(&plane_state->base.src,
- &plane_state->base.dst,
+ vscale = drm_rect_calc_vscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (drm_format_info_is_yuv_semiplanar(fb->format) &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -541,10 +542,10 @@ icl_program_input_csc(struct intel_plane *plane,
};
const u16 *csc;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- csc = input_csc_matrix[plane_state->base.color_encoding];
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->hw.color_encoding];
else
- csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+ csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
GOFF(csc[1]));
@@ -558,7 +559,7 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
else
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
@@ -574,7 +575,7 @@ static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- int color_plane, bool slave, u32 plane_ctl)
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -582,19 +583,20 @@ skl_program_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->color_plane[color_plane].offset;
u32 stride = skl_plane_stride(plane_state, color_plane);
+ u32 aux_dist = plane_state->color_plane[1].offset - surf_addr;
u32 aux_stride = skl_plane_stride(plane_state, 1);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
u32 x = plane_state->color_plane[color_plane].x;
u32 y = plane_state->color_plane[color_plane].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->planar_linked_plane;
- const struct drm_framebuffer *fb = plane_state->base.fb;
- u8 alpha = plane_state->base.alpha >> 8;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0;
unsigned long irqflags;
u32 keymsk, keymax;
+ u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -623,29 +625,13 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- u32 cus_ctl = 0;
-
- if (linked) {
- /* Enable and use MPEG-2 chroma siting */
- cus_ctl = PLANE_CUS_ENABLE |
- PLANE_CUS_HPHASE_0 |
- PLANE_CUS_VPHASE_SIGN_NEGATIVE |
- PLANE_CUS_VPHASE_0_25;
-
- if (linked->id == PLANE_SPRITE5)
- cus_ctl |= PLANE_CUS_PLANE_7;
- else if (linked->id == PLANE_SPRITE4)
- cus_ctl |= PLANE_CUS_PLANE_6;
- else
- MISSING_CASE(linked->id);
- }
+ if (INTEL_GEN(dev_priv) < 12)
+ aux_dist |= aux_stride;
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
- }
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
@@ -675,7 +661,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- if (!slave && plane_state->scaler_id >= 0)
+ if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -688,24 +674,12 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->planar_linked_plane) {
- /* Program the UV plane */
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
color_plane = 1;
- }
-
- skl_program_plane(plane, crtc_state, plane_state,
- color_plane, false, plane_state->ctl);
-}
-static void
-icl_update_slave(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- skl_program_plane(plane, crtc_state, plane_state, 0, true,
- plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+ skl_program_plane(plane, crtc_state, plane_state, color_plane);
}
-
static void
skl_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -765,9 +739,9 @@ static void i9xx_plane_linear_gamma(u16 gamma[8])
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
/*
* |r| | c0 c1 c2 | |cr|
@@ -793,7 +767,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
0, 4096, 7601,
},
};
- const s16 *csc = csc_matrix[plane_state->base.color_encoding];
+ const s16 *csc = csc_matrix[plane_state->hw.color_encoding];
/* Seems RGB data bypasses the CSC always */
if (!fb->format->is_yuv)
@@ -824,15 +798,15 @@ chv_update_csc(const struct intel_plane_state *plane_state)
static void
vlv_update_clrc(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
int contrast, brightness, sh_scale, sh_sin, sh_cos;
if (fb->format->is_yuv &&
- plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
+ plane_state->hw.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
/*
* Expand limited range to full range:
* Contrast is applied first and is used to expand Y range.
@@ -866,7 +840,7 @@ vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
/*
@@ -952,8 +926,8 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -972,6 +946,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_VYUY;
break;
+ case DRM_FORMAT_C8:
+ sprctl |= SP_FORMAT_8BPP;
+ break;
case DRM_FORMAT_RGB565:
sprctl |= SP_FORMAT_BGR565;
break;
@@ -987,6 +964,12 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ABGR2101010:
sprctl |= SP_FORMAT_RGBA1010102;
break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SP_FORMAT_BGRX1010102;
+ break;
+ case DRM_FORMAT_ARGB2101010:
+ sprctl |= SP_FORMAT_BGRA1010102;
+ break;
case DRM_FORMAT_XBGR8888:
sprctl |= SP_FORMAT_RGBX8888;
break;
@@ -998,7 +981,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SP_YUV_FORMAT_BT709;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1018,9 +1001,9 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void vlv_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u16 gamma[8];
@@ -1052,10 +1035,10 @@ vlv_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -1150,7 +1133,7 @@ static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
if (hweight8(active_planes) == 2) {
@@ -1186,7 +1169,7 @@ static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
unsigned int *num, unsigned int *den)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
switch (cpp) {
@@ -1244,8 +1227,8 @@ static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
*/
pixel_rate = crtc_state->pixel_rate;
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
if (src_w != dst_w)
ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
@@ -1264,7 +1247,7 @@ static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
unsigned int *num, unsigned int *den)
{
u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp = fb->format->cpp[0];
if (hweight8(active_planes) == 2) {
@@ -1319,8 +1302,8 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
return fb->format->cpp[0] == 8 &&
(IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
@@ -1330,9 +1313,9 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 sprctl;
@@ -1348,6 +1331,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ sprctl |= SPRITE_FORMAT_RGBX101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
break;
@@ -1374,10 +1363,10 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
if (!ivb_need_sprite_gamma(plane_state))
sprctl |= SPRITE_INT_GAMMA_DISABLE;
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
sprctl |= SPRITE_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1421,7 +1410,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
static void ivb_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
u16 gamma[18];
@@ -1460,14 +1449,14 @@ ivb_update_plane(struct intel_plane *plane,
u32 sprsurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 sprctl, sprscale = 0;
unsigned long irqflags;
@@ -1566,7 +1555,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int hscale, pixel_rate;
unsigned int limit, decimate;
@@ -1580,8 +1569,8 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
pixel_rate = crtc_state->pixel_rate;
/* Horizontal downscaling limits the maximum pixel rate */
- hscale = drm_rect_calc_hscale(&plane_state->base.src,
- &plane_state->base.dst,
+ hscale = drm_rect_calc_hscale(&plane_state->uapi.src,
+ &plane_state->uapi.dst,
0, INT_MAX);
if (hscale < 0x10000)
return pixel_rate;
@@ -1635,9 +1624,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ to_i915(plane_state->uapi.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 dvscntr;
@@ -1653,6 +1642,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ dvscntr |= DVS_FORMAT_RGBX101010;
+ break;
case DRM_FORMAT_XBGR16161616F:
dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
break;
@@ -1676,10 +1671,10 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
+ if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
dvscntr |= DVS_YUV_FORMAT_BT709;
- if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
dvscntr |= DVS_YUV_RANGE_CORRECTION_DISABLE;
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
@@ -1698,9 +1693,9 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
static void g4x_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[8];
int i;
@@ -1730,9 +1725,9 @@ static void ilk_sprite_linear_gamma(u16 gamma[17])
static void ilk_update_gamma(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum pipe pipe = plane->pipe;
u16 gamma[17];
int i;
@@ -1766,14 +1761,14 @@ g4x_update_plane(struct intel_plane *plane,
u32 dvssurf_offset = plane_state->color_plane[0].offset;
u32 linear_offset;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- u32 crtc_w = drm_rect_width(&plane_state->base.dst);
- u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_y = plane_state->uapi.dst.y1;
+ u32 crtc_w = drm_rect_width(&plane_state->uapi.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->uapi.dst);
u32 x = plane_state->color_plane[0].x;
u32 y = plane_state->color_plane[0].y;
- u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
- u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
u32 dvscntr, dvsscale = 0;
unsigned long irqflags;
@@ -1886,12 +1881,12 @@ static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- const struct drm_rect *src = &plane_state->base.src;
- const struct drm_rect *dst = &plane_state->base.dst;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int stride = plane_state->color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
@@ -1947,13 +1942,13 @@ static int
g4x_sprite_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (intel_fb_scalable(plane_state->base.fb)) {
+ if (intel_fb_scalable(plane_state->hw.fb)) {
if (INTEL_GEN(dev_priv) < 7) {
min_scale = 1;
max_scale = 16 << 16;
@@ -1963,8 +1958,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
}
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -1974,7 +1969,7 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -1995,9 +1990,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- unsigned int rotation = plane_state->base.rotation;
+ unsigned int rotation = plane_state->hw.rotation;
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
@@ -2020,8 +2015,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
@@ -2032,7 +2027,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = intel_plane_check_src_coordinates(plane_state);
@@ -2047,10 +2042,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
struct drm_format_name_buf format_name;
if (!fb)
@@ -2105,12 +2100,13 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->base.enable &&
- crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.enable &&
+ crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
(fb->modifier == I915_FORMAT_MOD_Y_TILED ||
fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) {
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS)) {
DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
}
@@ -2122,9 +2118,9 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
- to_i915(plane_state->base.plane->dev);
- int crtc_x = plane_state->base.dst.x1;
- int crtc_w = drm_rect_width(&plane_state->base.dst);
+ to_i915(plane_state->uapi.plane->dev);
+ int crtc_x = plane_state->uapi.dst.x1;
+ int crtc_w = drm_rect_width(&plane_state->uapi.dst);
int pipe_src_w = crtc_state->pipe_src_w;
/*
@@ -2150,12 +2146,13 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
- unsigned int rotation = plane_state->base.rotation;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int rotation = plane_state->hw.rotation;
+ int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
/* Display WA #1106 */
- if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -2175,7 +2172,7 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
* FIXME need to properly check this later.
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
- !drm_format_info_is_yuv_semiplanar(fb->format))
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0x30000 - 1;
else
return 0x20000 - 1;
@@ -2184,9 +2181,9 @@ static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
@@ -2201,8 +2198,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
max_scale = skl_plane_max_scale(dev_priv, fb);
}
- ret = drm_atomic_helper_check_plane_state(&plane_state->base,
- &crtc_state->base,
+ ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
+ &crtc_state->uapi,
min_scale, max_scale,
true, true);
if (ret)
@@ -2212,7 +2209,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
ret = skl_plane_check_dst_coordinates(crtc_state, plane_state);
@@ -2228,8 +2225,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* HW only has 8 bits pixel precision, disable plane if invisible */
- if (!(plane_state->base.alpha >> 8))
- plane_state->base.visible = false;
+ if (!(plane_state->hw.alpha >> 8))
+ plane_state->uapi.visible = false;
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
@@ -2237,6 +2234,15 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
plane_state);
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
+ icl_is_hdr_plane(dev_priv, plane->id))
+ /* Enable and use MPEG-2 chroma siting */
+ plane_state->cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE | PLANE_CUS_VPHASE_0_25;
+ else
+ plane_state->cus_ctl = 0;
+
return 0;
}
@@ -2248,7 +2254,7 @@ static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
const struct drm_intel_sprite_colorkey *set)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
@@ -2375,6 +2381,8 @@ static const u64 i9xx_plane_format_modifiers[] = {
static const u32 snb_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
@@ -2384,12 +2392,30 @@ static const u32 snb_plane_formats[] = {
};
static const u32 vlv_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+static const u32 chv_pipe_b_sprite_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -2462,6 +2488,8 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2483,6 +2511,8 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2508,6 +2538,8 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XRGB16161616F,
DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_ARGB16161616F,
@@ -2546,7 +2578,8 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static const u64 gen12_plane_format_modifiers_noccs[] = {
+static const u64 gen12_plane_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
@@ -2593,6 +2626,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_XRGB16161616F:
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
@@ -2620,6 +2655,7 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
}
switch (format) {
+ case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
@@ -2627,6 +2663,8 @@ static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2671,6 +2709,8 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2710,6 +2750,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
break;
default:
return false;
@@ -2720,9 +2761,14 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
+ if (is_ccs_modifier(modifier))
+ return true;
+ /* fall through */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2916,8 +2962,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
plane->min_cdclk = skl_plane_min_cdclk;
- if (icl_is_nv12_y_plane(plane_id))
- plane->update_slave = icl_update_slave;
if (INTEL_GEN(dev_priv) >= 11)
formats = icl_get_plane_formats(dev_priv, pipe,
@@ -2929,13 +2973,11 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (INTEL_GEN(dev_priv) >= 12) {
- /* TODO: Implement support for gen-12 CCS modifiers */
- plane->has_ccs = false;
- modifiers = gen12_plane_format_modifiers_noccs;
+ modifiers = gen12_plane_format_modifiers_ccs;
plane_funcs = &gen12_plane_funcs;
} else {
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
modifiers = skl_plane_format_modifiers_ccs;
else
@@ -3025,8 +3067,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->check_plane = vlv_sprite_check;
plane->min_cdclk = vlv_plane_min_cdclk;
- formats = vlv_plane_formats;
- num_formats = ARRAY_SIZE(vlv_plane_formats);
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ formats = chv_pipe_b_sprite_formats;
+ num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats);
+ } else {
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
+ }
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9983fadf6c28..50703536436c 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -924,7 +924,7 @@ intel_enable_tv(struct intel_encoder *encoder,
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(dev_priv,
- to_intel_crtc(pipe_config->base.crtc)->pipe);
+ to_intel_crtc(pipe_config->uapi.crtc)->pipe);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
}
@@ -1085,7 +1085,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode mode = {};
u32 tv_ctl, hctl1, hctl3, vctl1, vctl2, tmp;
struct tv_mode tv_mode = {};
@@ -1188,7 +1188,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
to_intel_tv_connector_state(conn_state);
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
@@ -1417,7 +1417,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 69a7cb1fa121..4d0c23b29248 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -115,6 +115,7 @@ enum bdb_block_id {
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
BDB_COMPRESSION_PARAMETERS = 56,
+ BDB_GENERIC_DTD = 58,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -368,7 +369,7 @@ struct child_device_config {
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
- u8 compression_method:1; /* 198 */
+ u8 compression_method_cps:1; /* 198 */
u8 ganged_edp:1; /* 202 */
u8 reserved0:4;
u8 compression_structure_index:4; /* 198 */
@@ -793,6 +794,35 @@ struct bdb_lfp_backlight_data {
} __packed;
/*
+ * Block 44 - LFP Power Conservation Features Block
+ */
+
+struct als_data_entry {
+ u16 backlight_adjust;
+ u16 lux;
+} __packed;
+
+struct agressiveness_profile_entry {
+ u8 dpst_agressiveness : 4;
+ u8 lace_agressiveness : 4;
+} __packed;
+
+struct bdb_lfp_power {
+ u8 lfp_feature_bits;
+ struct als_data_entry als[5];
+ u8 lace_aggressiveness_profile;
+ u16 dpst;
+ u16 psr;
+ u16 drrs;
+ u16 lace_support;
+ u16 adt;
+ u16 dmrrs;
+ u16 adb;
+ u16 lace_enabled_status;
+ struct agressiveness_profile_entry aggressivenes[16];
+} __packed;
+
+/*
* Block 52 - MIPI Configuration Block
*/
@@ -863,4 +893,34 @@ struct bdb_compression_parameters {
struct dsc_compression_parameters_entry data[16];
} __packed;
+/*
+ * Block 58 - Generic DTD Block
+ */
+
+struct generic_dtd_entry {
+ u32 pixel_clock;
+ u16 hactive;
+ u16 hblank;
+ u16 hfront_porch;
+ u16 hsync;
+ u16 vactive;
+ u16 vblank;
+ u16 vfront_porch;
+ u16 vsync;
+ u16 width_mm;
+ u16 height_mm;
+
+ /* Flags */
+ u8 rsvd_flags:6;
+ u8 vsync_positive_polarity:1;
+ u8 hsync_positive_polarity:1;
+
+ u8 rsvd[3];
+} __packed;
+
+struct bdb_generic_dtd {
+ u16 gdtd_size;
+ struct generic_dtd_entry dtd[]; /* up to 24 DTD's */
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 896b0c334f5e..6bab08db5d75 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
+#include "intel_dsi.h"
#include "intel_vdsc.h"
enum ROW_INDEX_BPP {
@@ -30,10 +31,8 @@ enum COLUMN_INDEX_BPC {
MAX_COLUMN_INDEX
};
-#define DSC_SUPPORTED_VERSION_MIN 1
-
/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
-static u16 rc_buf_thresh[] = {
+static const u16 rc_buf_thresh[] = {
896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
7744, 7872, 8000, 8064
};
@@ -53,7 +52,7 @@ struct rc_parameters {
* Selected Rate Control Related Parameter Recommended Values
* from DSC_v1.11 spec & C Model release: DSC_model_20161212
*/
-static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+static const struct rc_parameters rc_parameters[][MAX_COLUMN_INDEX] = {
{
/* 6BPP/8BPC */
{ 768, 15, 6144, 3, 13, 11, 11, {
@@ -319,63 +318,84 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+static const struct rc_parameters *get_rc_params(u16 compressed_bpp,
+ u8 bits_per_component)
+{
+ int row_index, column_index;
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ if (row_index < 0)
+ return NULL;
+
+ column_index = get_column_index_for_rc_params(bits_per_component);
+ if (column_index < 0)
+ return NULL;
+
+ return &rc_parameters[row_index][column_index];
+}
+
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+
+ if (!INTEL_INFO(i915)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (INTEL_GEN(i915) >= 10 &&
+ (pipe != PIPE_A ||
+ (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)))
+ return true;
+
+ return false;
+}
+
+static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (INTEL_GEN(i915) >= 12)
+ return true;
+
+ if (cpu_transcoder == TRANSCODER_EDP ||
+ cpu_transcoder == TRANSCODER_DSI_0 ||
+ cpu_transcoder == TRANSCODER_DSI_1)
+ return false;
+
+ /* There's no pipe A DSC engine on ICL */
+ WARN_ON(crtc->pipe == PIPE_A);
+
+ return true;
+}
+
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
{
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ const struct rc_parameters *rc_params;
u8 i = 0;
- int row_index = 0;
- int column_index = 0;
- u8 line_buf_depth = 0;
- vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
- /*
- * Slice Height of 8 works for all currently available panels. So start
- * with that if pic_height is an integral multiple of 8.
- * Eventually add logic to try multiple slice heights.
- */
- if (vdsc_cfg->pic_height % 8 == 0)
- vdsc_cfg->slice_height = 8;
- else if (vdsc_cfg->pic_height % 4 == 0)
- vdsc_cfg->slice_height = 4;
- else
- vdsc_cfg->slice_height = 2;
-
- /* Values filled from DSC Sink DPCD */
- vdsc_cfg->dsc_version_major =
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
- vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
-
- vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
- DP_DSC_RGB;
-
- line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
- if (!line_buf_depth) {
- DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
- return -EINVAL;
- }
- if (vdsc_cfg->dsc_version_minor == 2)
- vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
- else
- vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
- DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
/* Gen 11 does not support YCbCr */
vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- vdsc_cfg->block_pred_enable =
- intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
/* Gen 11 only supports integral values of bpp */
vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
@@ -399,39 +419,29 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->rc_buf_thresh[13] = 0x7D;
}
- row_index = get_row_index_for_rc_params(compressed_bpp);
- column_index =
- get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
-
- if (row_index < 0 || column_index < 0)
+ rc_params = get_rc_params(compressed_bpp, vdsc_cfg->bits_per_component);
+ if (!rc_params)
return -EINVAL;
- vdsc_cfg->first_line_bpg_offset =
- rc_params[row_index][column_index].first_line_bpg_offset;
- vdsc_cfg->initial_xmit_delay =
- rc_params[row_index][column_index].initial_xmit_delay;
- vdsc_cfg->initial_offset =
- rc_params[row_index][column_index].initial_offset;
- vdsc_cfg->flatness_min_qp =
- rc_params[row_index][column_index].flatness_min_qp;
- vdsc_cfg->flatness_max_qp =
- rc_params[row_index][column_index].flatness_max_qp;
- vdsc_cfg->rc_quant_incr_limit0 =
- rc_params[row_index][column_index].rc_quant_incr_limit0;
- vdsc_cfg->rc_quant_incr_limit1 =
- rc_params[row_index][column_index].rc_quant_incr_limit1;
+ vdsc_cfg->first_line_bpg_offset = rc_params->first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay = rc_params->initial_xmit_delay;
+ vdsc_cfg->initial_offset = rc_params->initial_offset;
+ vdsc_cfg->flatness_min_qp = rc_params->flatness_min_qp;
+ vdsc_cfg->flatness_max_qp = rc_params->flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 = rc_params->rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 = rc_params->rc_quant_incr_limit1;
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
vdsc_cfg->rc_range_params[i].range_min_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ rc_params->rc_range_params[i].range_min_qp;
vdsc_cfg->rc_range_params[i].range_max_qp =
- rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ rc_params->rc_range_params[i].range_max_qp;
/*
* Range BPG Offset uses 2's complement and is only a 6 bits. So
* mask it to get only 6 bits.
*/
vdsc_cfg->rc_range_params[i].range_bpg_offset =
- rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ rc_params->rc_range_params[i].range_bpg_offset &
DSC_RANGE_BPG_OFFSET_MASK;
}
@@ -453,41 +463,42 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return drm_dsc_compute_rc_parameters(vdsc_cfg);
+ return 0;
}
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/*
- * On ICL VDSC/joining for eDP transcoder uses a separate power well,
- * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
- * For any other transcoder, VDSC/joining uses the power well associated
- * with the pipe/transcoder in use. Hence another reference on the
- * transcoder power domain will suffice.
+ * VDSC/joining uses a separate power well, PW2, and requires
+ * POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain in two cases:
+ *
+ * - ICL eDP/DSI transcoder
+ * - TGL pipe A
*
- * On TGL we have the same mapping, but for transcoder A (the special
- * TRANSCODER_EDP is gone).
+ * For any other pipe, VDSC/joining uses the power well associated with
+ * the pipe in use. Hence another reference on the pipe power domain
+ * will suffice. (Except no VDSC/joining on ICL pipe A.)
*/
- if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
- return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
- else if (cpu_transcoder == TRANSCODER_EDP)
+ if (INTEL_GEN(i915) >= 12 && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (is_pipe_dsc(crtc_state))
+ return POWER_DOMAIN_PIPE(pipe);
else
- return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
}
-static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_dsc_pps_configure(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
@@ -508,7 +519,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
DRM_INFO("PPS0 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -527,7 +538,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
DRM_INFO("PPS1 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -547,7 +558,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
DRM_INFO("PPS2 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -567,7 +578,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
DRM_INFO("PPS3 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -587,7 +598,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
DRM_INFO("PPS4 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -607,7 +618,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
DRM_INFO("PPS5 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -629,7 +640,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
DRM_INFO("PPS6 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -649,7 +660,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
DRM_INFO("PPS7 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -669,7 +680,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
DRM_INFO("PPS8 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -689,7 +700,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
DRM_INFO("PPS9 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -711,7 +722,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
DRM_INFO("PPS10 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -734,7 +745,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
DRM_INFO("PPS16 = 0x%08x\n", pps_val);
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
@@ -758,7 +769,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
@@ -807,7 +818,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
- if (cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -880,8 +891,75 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
}
}
-static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum pipe pipe = crtc->pipe;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 dss_ctl1, dss_ctl2, val;
+
+ if (!intel_dsc_source_support(encoder, crtc_state))
+ return;
+
+ power_domain = intel_dsc_power_domain(crtc_state);
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
+ return;
+
+ if (!is_pipe_dsc(crtc_state)) {
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ } else {
+ dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ }
+
+ crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
+ if (!crtc_state->dsc.compression_enable)
+ goto out;
+
+ crtc_state->dsc.dsc_split = (dss_ctl2 & RIGHT_BRANCH_VDSC_ENABLE) &&
+ (dss_ctl1 & JOINER_ENABLE);
+
+ /* FIXME: add more state readout as needed */
+
+ /* PPS1 */
+ if (!is_pipe_dsc(crtc_state))
+ val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ else
+ val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ vdsc_cfg->bits_per_pixel = val;
+ crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
+}
+
+static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ struct drm_dsc_picture_parameter_set pps;
+ enum port port;
+
+ drm_dsc_pps_payload_pack(&pps, vdsc_cfg);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_picture_parameter_set(dsi, &pps);
+ mipi_dsi_compression_mode(dsi, true);
+ }
+}
+
+static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -902,7 +980,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -916,11 +994,14 @@ void intel_dsc_enable(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
- intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+ intel_dsc_pps_configure(encoder, crtc_state);
- intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+ if (encoder->type == INTEL_OUTPUT_DSI)
+ intel_dsc_dsi_pps_write(encoder, crtc_state);
+ else
+ intel_dsc_dp_pps_write(encoder, crtc_state);
- if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
@@ -938,7 +1019,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
@@ -947,7 +1028,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->dsc.compression_enable)
return;
- if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ if (!is_pipe_dsc(old_crtc_state)) {
dss_ctl1_reg = DSS_CTL1;
dss_ctl2_reg = DSS_CTL2;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 90d3f6017fcb..e56a3254c214 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -6,15 +6,20 @@
#ifndef __INTEL_VDSC_H__
#define __INTEL_VDSC_H__
+#include <linux/types.h>
+
struct intel_encoder;
struct intel_crtc_state;
-struct intel_dp;
+bool intel_dsc_source_support(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
-int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config);
+int intel_dsc_compute_params(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_dsc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0ca49b1604c6..21e820299107 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -261,9 +261,9 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
@@ -624,7 +624,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -746,7 +746,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
@@ -882,8 +882,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -892,6 +892,12 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
+ if (IS_GEN9_LP(dev_priv)) {
+ intel_crtc_vblank_off(old_crtc_state);
+
+ skylake_scaler_disable(old_crtc_state);
+ }
+
if (is_vid_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
@@ -1032,9 +1038,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
+ &pipe_config->hw.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
@@ -1045,7 +1051,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
- adjusted_mode_sw = &crtc->config->base.adjusted_mode;
+ adjusted_mode_sw = &crtc->config->hw.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -1204,7 +1210,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
if (pclk) {
- pipe_config->base.adjusted_mode.crtc_clock = pclk;
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
}
@@ -1315,9 +1321,9 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b9f504ba3b32..34be4c0ee7c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -20,33 +20,31 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
}
static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
+ struct drm_i915_gem_object *obj = clflush->obj;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
- goto put;
+ return err;
__do_clflush(obj);
i915_gem_object_unpin_pages(obj);
-put:
- i915_gem_object_put(obj);
- return err;
+ return 0;
}
static void clflush_release(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
- if (clflush->obj)
- i915_gem_object_put(clflush->obj);
+ i915_gem_object_put(clflush->obj);
}
static const struct dma_fence_work_ops clflush_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 337ba17b1e0e..dc90b044a217 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,7 +69,9 @@
#include <drm/i915_drm.h>
+#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
@@ -169,12 +171,80 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
+static struct i915_address_space *
+context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
+
+ do {
+ struct i915_address_space *vm;
+
+ /*
+ * We do not allow downgrading from full-ppgtt [to a shared
+ * global gtt], so ctx->vm cannot become NULL.
+ */
+ vm = rcu_dereference(ctx->vm);
+ if (!kref_get_unless_zero(&vm->ref))
+ continue;
+
+ /*
+ * This ppgtt may have be reallocated between
+ * the read and the kref, and reassigned to a third
+ * context. In order to avoid inadvertent sharing
+ * of this ppgtt with that third context (and not
+ * src), we have to confirm that we have the same
+ * ppgtt after passing through the strong memory
+ * barrier implied by a successful
+ * kref_get_unless_zero().
+ *
+ * Once we have acquired the current ppgtt of ctx,
+ * we no longer care if it is released from ctx, as
+ * it cannot be reallocated elsewhere.
+ */
+
+ if (vm == rcu_access_pointer(ctx->vm))
+ return rcu_pointer_handoff(vm);
+
+ i915_vm_put(vm);
+ } while (1);
+}
+
+static void intel_context_set_gem(struct intel_context *ce,
+ struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
+ RCU_INIT_POINTER(ce->gem_context, ctx);
+
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
+ if (rcu_access_pointer(ctx->vm)) {
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = context_get_vm_rcu(ctx); /* hmm */
+ rcu_read_unlock();
+
+ i915_vm_put(ce->vm);
+ ce->vm = vm;
+ }
+
+ GEM_BUG_ON(ce->timeline);
+ if (ctx->timeline)
+ ce->timeline = intel_timeline_get(ctx->timeline);
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+ intel_engine_has_semaphores(ce->engine))
+ __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
if (!e->engines[count])
continue;
+ RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -211,12 +281,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
GEM_BUG_ON(e->engines[engine->legacy_idx]);
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
__free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
+ intel_context_set_gem(ce, ctx);
+
e->engines[engine->legacy_idx] = ce;
e->num_engines = max(e->num_engines, engine->legacy_idx);
}
@@ -236,14 +308,10 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
- kfree(ctx->jump_whitelist);
-
if (ctx->timeline)
intel_timeline_put(ctx->timeline);
- kfree(ctx->name);
put_pid(ctx->pid);
-
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
@@ -389,15 +457,6 @@ static void kill_context(struct i915_gem_context *ctx)
struct intel_context *ce;
/*
- * If we are already banned, it was due to a guilty request causing
- * a reset and the entire context being evicted from the GPU.
- */
- if (i915_gem_context_is_banned(ctx))
- return;
-
- i915_gem_context_set_banned(ctx);
-
- /*
* Map the user's engine back to the actual engines; one virtual
* engine will be mapped to multiple engines, and using ctx->engine[]
* the same engine may be have multiple instances in the user's map.
@@ -407,6 +466,9 @@ static void kill_context(struct i915_gem_context *ctx)
for_each_gem_engine(ce, __context_engines_static(ctx), it) {
struct intel_engine_cs *engine;
+ if (intel_context_set_banned(ce))
+ continue;
+
/*
* Check the current active state of this context; if we
* are currently executing on the GPU we need to evict
@@ -427,11 +489,29 @@ static void kill_context(struct i915_gem_context *ctx)
}
}
+static void set_closed_name(struct i915_gem_context *ctx)
+{
+ char *s;
+
+ /* Replace '[]' with '<>' to indicate closed in debug prints */
+
+ s = strrchr(ctx->name, '[');
+ if (!s)
+ return;
+
+ *s = '<';
+
+ s = strchr(s + 1, ']');
+ if (s)
+ *s = '>';
+}
+
static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
i915_gem_context_set_closed(ctx);
+ set_closed_name(ctx);
mutex_lock(&ctx->mutex);
@@ -529,9 +609,6 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
- ctx->jump_whitelist = NULL;
- ctx->jump_whitelist_cmds = 0;
-
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
@@ -661,37 +738,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
return ctx;
}
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
- struct i915_gem_context *ctx;
-
- /* Keep the context ref so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(ctxp));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- context_close(ctx);
- i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
- struct i915_gem_context *ctx;
-
- ctx = i915_gem_create_context(i915, 0);
- if (IS_ERR(ctx))
- return ctx;
-
- i915_gem_context_clear_bannable(ctx);
- i915_gem_context_set_persistence(ctx);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
- return ctx;
-}
-
static void init_contexts(struct i915_gem_contexts *gc)
{
spin_lock_init(&gc->lock);
@@ -701,32 +747,16 @@ static void init_contexts(struct i915_gem_contexts *gc)
init_llist_head(&gc->free_list);
}
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
-
- /* Reassure ourselves we are only called once */
- GEM_BUG_ON(i915->kernel_context);
-
init_contexts(&i915->gem.contexts);
-
- /* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
- if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default global context\n");
- return PTR_ERR(ctx);
- }
- i915->kernel_context = ctx;
-
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
- return 0;
}
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- destroy_kernel_context(&i915->kernel_context);
flush_work(&i915->gem.contexts.free_work);
}
@@ -757,12 +787,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
- current->comm, pid_nr(ctx->pid));
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
+ snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
/* And finally expose ourselves to userspace via the idr */
mutex_lock(&fpriv->context_idr_lock);
@@ -771,8 +797,6 @@ static int gem_context_register(struct i915_gem_context *ctx,
if (ret >= 0)
goto out;
- kfree(fetch_and_zero(&ctx->name));
-err_pid:
put_pid(fetch_and_zero(&ctx->pid));
out:
return ret;
@@ -801,7 +825,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
if (err < 0)
goto err_ctx;
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
GEM_BUG_ON(err > 0);
return 0;
@@ -1012,7 +1035,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
return -ENODEV;
rcu_read_lock();
- vm = i915_vm_get(ctx->vm);
+ vm = context_get_vm_rcu(ctx);
rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
@@ -1049,7 +1072,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->hw_context->vm;
+ struct i915_address_space *vm = rq->context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1096,9 +1119,6 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
- } else {
- /* ppGTT is not part of the legacy context image */
- gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
}
return 0;
@@ -1106,10 +1126,20 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
static bool skip_ppgtt_update(struct intel_context *ce, void *data)
{
+ if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
+ return true;
+
if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
- return !ce->state;
- else
- return !atomic_read(&ce->pin_count);
+ return false;
+
+ if (!atomic_read(&ce->pin_count))
+ return true;
+
+ /* ppGTT is not part of the legacy context image */
+ if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
+ return true;
+
+ return false;
}
static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1217,7 +1247,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (!intel_context_is_pinned(ce))
return 0;
- rq = i915_request_create(ce->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -1485,12 +1515,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
}
}
- ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+ ce = intel_execlists_create_virtual(siblings, n);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_siblings;
}
+ intel_context_set_gem(ce, set->ctx);
+
if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
intel_context_put(ce);
err = -EEXIST;
@@ -1660,12 +1692,14 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
__free_engines(set.engines, n);
return PTR_ERR(ce);
}
+ intel_context_set_gem(ce, ctx);
+
set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -1806,6 +1840,44 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+
+ if (!intel_engine_has_semaphores(ce->engine))
+ return;
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
+ intel_context_set_use_semaphores(ce);
+ else
+ intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ s64 priority = args->value;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ return -ENODEV;
+
+ if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+ priority < I915_CONTEXT_MIN_USER_PRIORITY)
+ return -EINVAL;
+
+ if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+ !capable(CAP_SYS_NICE))
+ return -EPERM;
+
+ ctx->sched.priority = I915_USER_PRIORITY(priority);
+ context_apply_all(ctx, __apply_priority, ctx);
+
+ return 0;
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1852,23 +1924,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
break;
case I915_CONTEXT_PARAM_PRIORITY:
- {
- s64 priority = args->value;
-
- if (args->size)
- ret = -EINVAL;
- else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
- ret = -ENODEV;
- else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
- priority < I915_CONTEXT_MIN_USER_PRIORITY)
- ret = -EINVAL;
- else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
- !capable(CAP_SYS_NICE))
- ret = -EPERM;
- else
- ctx->sched.priority =
- I915_USER_PRIORITY(priority);
- }
+ ret = set_priority(ctx, args);
break;
case I915_CONTEXT_PARAM_SSEU:
@@ -1948,20 +2004,23 @@ static int clone_engines(struct i915_gem_context *dst,
*/
if (intel_engine_is_virtual(engine))
clone->engines[n] =
- intel_execlists_clone_virtual(dst, engine);
+ intel_execlists_clone_virtual(engine);
else
- clone->engines[n] = intel_context_create(dst, engine);
+ clone->engines[n] = intel_context_create(engine);
if (IS_ERR_OR_NULL(clone->engines[n])) {
__free_engines(clone, n);
goto err_unlock;
}
+
+ intel_context_set_gem(clone->engines[n], dst);
}
clone->num_engines = n;
user_engines = i915_gem_context_user_engines(src);
i915_gem_context_unlock_engines(src);
- free_engines(dst->engines);
+ /* Serialised by constructor */
+ free_engines(__context_engines_static(dst));
RCU_INIT_POINTER(dst->engines, clone);
if (user_engines)
i915_gem_context_set_user_engines(dst);
@@ -1996,7 +2055,8 @@ static int clone_sseu(struct i915_gem_context *dst,
unsigned long n;
int err;
- clone = dst->engines; /* no locking required; sole access */
+ /* no locking required; sole access under constructor*/
+ clone = __context_engines_static(dst);
if (e->num_engines != clone->num_engines) {
err = -EINVAL;
goto unlock;
@@ -2041,47 +2101,21 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_address_space *vm;
int err = 0;
- rcu_read_lock();
- do {
- vm = rcu_dereference(src->vm);
- if (!vm)
- break;
-
- if (!kref_get_unless_zero(&vm->ref))
- continue;
-
- /*
- * This ppgtt may have be reallocated between
- * the read and the kref, and reassigned to a third
- * context. In order to avoid inadvertent sharing
- * of this ppgtt with that third context (and not
- * src), we have to confirm that we have the same
- * ppgtt after passing through the strong memory
- * barrier implied by a successful
- * kref_get_unless_zero().
- *
- * Once we have acquired the current ppgtt of src,
- * we no longer care if it is released from src, as
- * it cannot be reallocated elsewhere.
- */
-
- if (vm == rcu_access_pointer(src->vm))
- break;
+ if (!rcu_access_pointer(src->vm))
+ return 0;
- i915_vm_put(vm);
- } while (1);
+ rcu_read_lock();
+ vm = context_get_vm_rcu(src);
rcu_read_unlock();
- if (vm) {
- if (!mutex_lock_interruptible(&dst->mutex)) {
- __assign_ppgtt(dst, vm);
- mutex_unlock(&dst->mutex);
- } else {
- err = -EINTR;
- }
- i915_vm_put(vm);
+ if (!mutex_lock_interruptible(&dst->mutex)) {
+ __assign_ppgtt(dst, vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
}
+ i915_vm_put(vm);
return err;
}
@@ -2167,8 +2201,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm,
- pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ current->comm, task_pid_nr(current));
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18e50a769a6e..14f3cc1b7583 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -91,26 +91,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
}
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
-{
- set_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
- __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
static inline bool
i915_gem_context_user_engines(const struct i915_gem_context *ctx)
{
@@ -129,31 +109,8 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-static inline bool
-i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
-{
- return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
-{
- set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
-{
- clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
-{
- return !ctx->file_priv;
-}
-
/* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
@@ -178,9 +135,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 3870dd5daaa0..017ca803ab47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -100,15 +100,6 @@ struct i915_gem_context {
*/
struct pid *pid;
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
/** link: place with &drm_i915_private.context_list */
struct list_head link;
struct llist_node free_link;
@@ -143,11 +134,8 @@ struct i915_gem_context {
* @flags: small set of booleans
*/
unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-#define CONTEXT_USER_ENGINES 3
-#define CONTEXT_NOPREEMPT 4
+#define CONTEXT_CLOSED 0
+#define CONTEXT_USER_ENGINES 1
struct mutex mutex;
@@ -177,12 +165,14 @@ struct i915_gem_context {
*/
struct radix_tree_root handles_vma;
- /** jump_whitelist: Bit array for tracking cmds during cmdparsing
- * Guarded by struct_mutex
+ /**
+ * @name: arbitrary name, used for user debug
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
*/
- unsigned long *jump_whitelist;
- /** jump_whitelist_cmds: No of cmd slots available */
- u32 jump_whitelist_cmds;
+ char name[TASK_COMM_LEN + 8];
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9937b4c341f1..0cc40e77bbd2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -12,6 +12,8 @@
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_vma.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
@@ -148,9 +150,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
+ struct i915_vma *vma;
+
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
obj->mm.dirty = true;
+
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ i915_vma_set_ggtt_write(vma);
+ spin_unlock(&obj->vma.lock);
}
i915_gem_object_unpin_pages(obj);
@@ -175,138 +185,34 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- struct i915_vma *vma;
int ret;
- assert_object_held(obj);
-
if (obj->cache_level == cache_level)
return 0;
- /* Inspect the list of currently bound VMA and unbind any that would
- * be invalid given the new cache-level. This is principally to
- * catch the issue of the CS prefetch crossing page boundaries and
- * reading an invalid PTE on older architectures.
- */
-restart:
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_pinned(vma)) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (!i915_vma_is_closed(vma) &&
- i915_gem_valid_gtt_space(vma, cache_level))
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
-
- /* As unbinding may affect other elements in the
- * obj->vma_list (due to side-effects from retiring
- * an active vma), play safe and restart the iterator.
- */
- goto restart;
- }
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
- /* We can reuse the existing drm_mm nodes but need to change the
- * cache-level on the PTE. We could simply unbind them all and
- * rebind with the correct cache-level on next use. However since
- * we already have a valid slot, dma mapping, pages etc, we may as
- * rewrite the PTE in the belief that doing so tramples upon less
- * state and so involves less work.
- */
- if (atomic_read(&obj->bind_count)) {
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
- /* Before we change the PTE, the GPU must not be accessing it.
- * If we wait upon the object, we know that all the bound
- * VMA are no longer active.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
- intel_wakeref_t wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
-
- /*
- * Access to snoopable pages through the GTT is
- * incoherent and on some machines causes a hard
- * lockup. Relinquish the CPU mmaping to force
- * userspace to refault in the pages and we can
- * then double check if the GTT mapping is still
- * valid for that pointer access.
- */
- ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
- if (ret) {
- intel_runtime_pm_put(&i915->runtime_pm,
- wakeref);
- return ret;
- }
-
- if (obj->userfault_count)
- __i915_gem_object_release_mmap(obj);
-
- /*
- * As we no longer need a fence for GTT access,
- * we can relinquish it now (and so prevent having
- * to steal a fence from someone else on the next
- * fence request). Note GPU activity would have
- * dropped the fence as all snoopable access is
- * supposed to be linear.
- */
- for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_revoke_fence(vma);
- if (ret)
- break;
- }
- mutex_unlock(&i915->ggtt.vm.mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- if (ret)
- return ret;
- } else {
- /*
- * We either have incoherent backing store and
- * so no GTT access or the architecture is fully
- * coherent. In such cases, existing GTT mmaps
- * ignore the cache bit in the PTE and we can
- * rewrite it without confusing the GPU or having
- * to force userspace to fault back in its mmaps.
- */
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- /* Wait for an earlier async bind, need to rewrite it */
- ret = i915_vma_sync(vma);
- if (ret)
- return ret;
-
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
- if (ret)
- return ret;
- }
+ /* Always invalidate stale cachelines */
+ if (obj->cache_level != cache_level) {
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true;
}
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (i915_vm_has_cache_coloring(vma->vm))
- vma->node.color = cache_level;
- }
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true; /* Always invalidate stale cachelines */
+ i915_gem_object_unlock(obj);
- return 0;
+ /* The cache-level will be applied when each vma is rebound. */
+ return i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
}
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
@@ -387,20 +293,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->cache_level == level)
- goto out;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
-
- ret = i915_gem_object_lock_interruptible(obj);
- if (ret == 0) {
- ret = i915_gem_object_set_cache_level(obj, level);
- i915_gem_object_unlock(obj);
- }
+ ret = i915_gem_object_set_cache_level(obj, level);
out:
i915_gem_object_put(obj);
@@ -419,10 +312,13 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
unsigned int flags)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
- assert_object_held(obj);
+ /* Frame buffer must be in LMEM (no migration yet) */
+ if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
+ return ERR_PTR(-EINVAL);
/*
* The display engine is not coherent with the LLC cache on gen6. As
@@ -435,7 +331,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(to_i915(obj->base.dev)) ?
+ HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
return ERR_PTR(ret);
@@ -462,13 +358,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
- __i915_gem_object_flush_for_display(obj);
-
- /*
- * It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ i915_gem_object_flush_if_display(obj);
return vma;
}
@@ -479,8 +369,11 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!atomic_read(&obj->bind_count))
+ return;
mutex_lock(&i915->ggtt.vm.mutex);
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -488,6 +381,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
+ spin_unlock(&obj->vma.lock);
mutex_unlock(&i915->ggtt.vm.mutex);
if (i915_gem_object_is_shrinkable(obj)) {
@@ -664,7 +558,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
if (write_domain)
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -784,7 +678,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f0998f1225af..cbd2bcade3c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,6 +25,7 @@
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
#include "i915_gem_ioctls.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
enum {
@@ -228,6 +229,7 @@ struct i915_execbuffer {
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
@@ -253,7 +255,6 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
- struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@@ -277,25 +278,6 @@ struct i915_execbuffer {
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-/*
- * Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline u64 gen8_canonical_addr(u64 address)
-{
- return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
-
-static inline u64 gen8_noncanonical_addr(u64 address)
-{
- return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
-}
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -748,9 +730,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
- return -EIO;
-
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
@@ -886,9 +865,6 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
- if (eb->reloc_cache.ce)
- intel_context_put(eb->reloc_cache.ce);
-
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -912,7 +888,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
cache->node.flags = 0;
- cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -1182,7 +1157,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = intel_context_create_request(cache->ce);
+ rq = i915_request_create(eb->context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1246,36 +1221,9 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
- /* If we need to copy for the cmdparser, we will stall anyway */
- if (eb_use_cmdparser(eb))
- return ERR_PTR(-EWOULDBLOCK);
-
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
- if (!cache->ce) {
- struct intel_context *ce;
-
- /*
- * The CS pre-parser can pre-fetch commands across
- * memory sync points and starting gen12 it is able to
- * pre-fetch across BB_START and BB_END boundaries
- * (within the same context). We therefore use a
- * separate context gen12+ to guarantee that the reloc
- * writes land before the parser gets to the target
- * memory location.
- */
- if (cache->gen >= 12)
- ce = intel_context_create(eb->context->gem_context,
- eb->engine);
- else
- ce = intel_context_get(eb->context);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- cache->ce = ce;
- }
-
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1943,15 +1891,15 @@ err_skip:
return err;
}
-static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
- return false;
+ return -EINVAL;
/* Kernel clipping was a DRI1 misfeature */
if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
if (exec->num_cliprects || exec->cliprects_ptr)
- return false;
+ return -EINVAL;
}
if (exec->DR4 == 0xffffffff) {
@@ -1959,12 +1907,12 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
exec->DR4 = 0;
}
if (exec->DR1 || exec->DR4)
- return false;
+ return -EINVAL;
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
- return false;
+ return -EINVAL;
- return true;
+ return 0;
}
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
@@ -1993,99 +1941,179 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
}
static struct i915_vma *
-shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
+shadow_batch_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned int flags)
{
- struct drm_i915_private *dev_priv = eb->i915;
- struct i915_vma * const vma = *eb->vma;
- struct i915_address_space *vm;
- u64 flags;
+ struct i915_vma *vma;
+ int err;
- /*
- * PPGTT backed shadow buffers must be mapped RO, to prevent
- * post-scan tampering
- */
- if (CMDPARSER_USES_GGTT(dev_priv)) {
- flags = PIN_GLOBAL;
- vm = &dev_priv->ggtt.vm;
- } else if (vma->vm->has_read_only) {
- flags = PIN_USER;
- vm = vma->vm;
- i915_gem_object_set_readonly(obj);
- } else {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
- return ERR_PTR(-EINVAL);
- }
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return vma;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return ERR_PTR(err);
- return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+ return vma;
}
-static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
+struct eb_parse_work {
+ struct dma_fence_work base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *batch;
+ struct i915_vma *shadow;
+ struct i915_vma *trampoline;
+ unsigned int batch_offset;
+ unsigned int batch_length;
+};
+
+static int __eb_parse(struct dma_fence_work *work)
{
- struct intel_engine_pool_node *pool;
- struct i915_vma *vma;
- u64 batch_start;
- u64 shadow_batch_start;
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ return intel_engine_cmd_parser(pw->engine,
+ pw->batch,
+ pw->batch_offset,
+ pw->batch_length,
+ pw->shadow,
+ pw->trampoline);
+}
+
+static const struct dma_fence_work_ops eb_parse_ops = {
+ .name = "eb_parse",
+ .work = __eb_parse,
+};
+
+static int eb_parse_pipeline(struct i915_execbuffer *eb,
+ struct i915_vma *shadow,
+ struct i915_vma *trampoline)
+{
+ struct eb_parse_work *pw;
int err;
- pool = intel_engine_get_pool(eb->engine, eb->batch_len);
- if (IS_ERR(pool))
- return ERR_CAST(pool);
+ pw = kzalloc(sizeof(*pw), GFP_KERNEL);
+ if (!pw)
+ return -ENOMEM;
- vma = shadow_batch_pin(eb, pool->obj);
- if (IS_ERR(vma))
- goto err;
+ dma_fence_work_init(&pw->base, &eb_parse_ops);
- batch_start = gen8_canonical_addr(eb->batch->node.start) +
- eb->batch_start_offset;
+ pw->engine = eb->engine;
+ pw->batch = eb->batch;
+ pw->batch_offset = eb->batch_start_offset;
+ pw->batch_length = eb->batch_len;
+ pw->shadow = shadow;
+ pw->trampoline = trampoline;
- shadow_batch_start = gen8_canonical_addr(vma->node.start);
+ dma_resv_lock(pw->batch->resv, NULL);
- err = intel_engine_cmd_parser(eb->gem_context,
- eb->engine,
- eb->batch->obj,
- batch_start,
- eb->batch_start_offset,
- eb->batch_len,
- pool->obj,
- shadow_batch_start);
+ err = dma_resv_reserve_shared(pw->batch->resv, 1);
+ if (err)
+ goto err_batch_unlock;
- if (err) {
- i915_vma_unpin(vma);
+ /* Wait for all writes (and relocs) into the batch to complete */
+ err = i915_sw_fence_await_reservation(&pw->base.chain,
+ pw->batch->resv, NULL, false,
+ 0, I915_FENCE_GFP);
+ if (err < 0)
+ goto err_batch_unlock;
+
+ /* Keep the batch alive and unwritten as we parse */
+ dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
+
+ dma_resv_unlock(pw->batch->resv);
+ /* Force execution to wait for completion of the parser */
+ dma_resv_lock(shadow->resv, NULL);
+ dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
+ dma_resv_unlock(shadow->resv);
+
+ dma_fence_work_commit(&pw->base);
+ return 0;
+
+err_batch_unlock:
+ dma_resv_unlock(pw->batch->resv);
+ kfree(pw);
+ return err;
+}
+
+static int eb_parse(struct i915_execbuffer *eb)
+{
+ struct intel_engine_pool_node *pool;
+ struct i915_vma *shadow, *trampoline;
+ unsigned int len;
+ int err;
+
+ if (!eb_use_cmdparser(eb))
+ return 0;
+
+ len = eb->batch_len;
+ if (!CMDPARSER_USES_GGTT(eb->i915)) {
/*
- * Unsafe GGTT-backed buffers can still be submitted safely
- * as non-secure.
- * For PPGTT backing however, we have no choice but to forcibly
- * reject unsafe buffers
+ * ppGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
*/
- if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES))
- /* Execute original buffer non-secure */
- vma = NULL;
- else
- vma = ERR_PTR(err);
+ if (!eb->context->vm->has_read_only) {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return -EINVAL;
+ }
+ } else {
+ len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
+ }
+
+ pool = intel_engine_get_pool(eb->engine, len);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
goto err;
}
+ i915_gem_object_set_readonly(shadow->obj);
+
+ trampoline = NULL;
+ if (CMDPARSER_USES_GGTT(eb->i915)) {
+ trampoline = shadow;
+
+ shadow = shadow_batch_pin(pool->obj,
+ &eb->engine->gt->ggtt->vm,
+ PIN_GLOBAL);
+ if (IS_ERR(shadow)) {
+ err = PTR_ERR(shadow);
+ shadow = trampoline;
+ goto err_shadow;
+ }
+
+ eb->batch_flags |= I915_DISPATCH_SECURE;
+ }
+
+ err = eb_parse_pipeline(eb, shadow, trampoline);
+ if (err)
+ goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->vma[eb->buffer_count] = i915_vma_get(shadow);
eb->flags[eb->buffer_count] =
__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- vma->exec_flags = &eb->flags[eb->buffer_count];
+ shadow->exec_flags = &eb->flags[eb->buffer_count];
eb->buffer_count++;
+ eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = vma;
+ eb->batch = shadow;
- if (CMDPARSER_USES_GGTT(eb->i915))
- eb->batch_flags |= I915_DISPATCH_SECURE;
-
- /* eb->batch_len unchanged */
-
- vma->private = pool;
- return vma;
+ shadow->private = pool;
+ return 0;
+err_trampoline:
+ if (trampoline)
+ i915_vma_unpin(trampoline);
+err_shadow:
+ i915_vma_unpin(shadow);
err:
intel_engine_pool_put(pool);
- return vma;
+ return err;
}
static void
@@ -2134,7 +2162,17 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
- if (i915_gem_context_nopreempt(eb->gem_context))
+ if (eb->trampoline) {
+ GEM_BUG_ON(eb->batch_start_offset);
+ err = eb->engine->emit_bb_start(eb->request,
+ eb->trampoline->node.start +
+ eb->batch_len,
+ 0, 0);
+ if (err)
+ return err;
+ }
+
+ if (intel_context_nopreempt(eb->context))
eb->request->flags |= I915_REQUEST_NOPREEMPT;
return 0;
@@ -2220,6 +2258,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
if (err)
return err;
+ if (unlikely(intel_context_is_banned(ce)))
+ return -EIO;
+
/*
* Pinning the contexts may generate requests in order to acquire
* GGTT space, so do this first before we reserve a seqno for
@@ -2515,6 +2556,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
eb.batch_len = args->batch_len;
+ eb.trampoline = NULL;
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
@@ -2606,15 +2648,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (eb.batch_len == 0)
eb.batch_len = eb.batch->size - eb.batch_start_offset;
- if (eb_use_cmdparser(&eb)) {
- struct i915_vma *vma;
-
- vma = eb_parse(&eb);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_vma;
- }
- }
+ err = eb_parse(&eb);
+ if (err)
+ goto err_vma;
/*
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
@@ -2694,6 +2730,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_submit(&eb);
err_request:
add_to_client(eb.request, file);
+ i915_request_get(eb.request);
i915_request_add(eb.request);
if (fences)
@@ -2709,6 +2746,7 @@ err_request:
fput(out_fence->file);
}
}
+ i915_request_put(eb.request);
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
@@ -2718,6 +2756,8 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
+ if (eb.trampoline)
+ i915_vma_unpin(eb.trampoline);
mutex_unlock(&dev->struct_mutex);
err_engine:
eb_unpin_engine(&eb);
@@ -2787,8 +2827,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- if (!i915_gem_check_execbuffer(&exec2))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(&exec2);
+ if (err)
+ return err;
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(count, sizeof(*exec_list),
@@ -2865,8 +2906,9 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (!i915_gem_check_execbuffer(args))
- return -EINVAL;
+ err = i915_gem_check_execbuffer(args);
+ if (err)
+ return err;
/* Allocate an extra slot for use by the command parser */
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index ddc7f2a52b3e..87d8b27f426d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -28,8 +28,8 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 0e2bf6b7e143..520cc9cac471 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -79,9 +79,6 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e3002849844b..879fff8adc48 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -5,6 +5,7 @@
*/
#include <linux/mman.h>
+#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include "gt/intel_gt.h"
@@ -14,7 +15,9 @@
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_gem_mman.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "i915_vma.h"
static inline bool
@@ -144,6 +147,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
+ * 4 - Support multiple fault handlers per object depending on object's
+ * backing storage (a.k.a. MMAP_OFFSET).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -171,7 +177,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 3;
+ return 4;
}
static inline struct i915_ggtt_view
@@ -197,29 +203,83 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
return view;
}
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace. The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room. So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+ switch (err) {
+ default:
+ WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
+ case 0:
+ case -EAGAIN:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ }
+}
+
+static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+{
+ struct vm_area_struct *area = vmf->vma;
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+ unsigned long i, size = area->vm_end - area->vm_start;
+ bool write = area->vm_flags & VM_WRITE;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+ int err;
+
+ if (!i915_gem_object_has_struct_page(obj))
+ return ret;
+
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return ret;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return i915_error_to_vmf_fault(err);
+
+ /* PTEs are revoked in obj->ops->put_pages() */
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ struct page *page = i915_gem_object_get_page(obj, i);
+
+ ret = vmf_insert_pfn(area,
+ (unsigned long)area->vm_start + i * PAGE_SIZE,
+ page_to_pfn(page));
+ if (ret != VM_FAULT_NOPAGE)
+ break;
+ }
+
+ if (write) {
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ obj->cache_dirty = true; /* XXX flush after PAT update? */
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+
+ return ret;
+}
+
+static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct i915_mmap_offset *mmo = area->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -312,6 +372,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
+ /* Track the mmo associated with the fenced vma */
+ vma->mmo = mmo;
+
if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -332,67 +395,36 @@ err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
err:
- switch (ret) {
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- /* fallthrough */
- case -EIO: /* shmemfs failure from swap device */
- case -EFAULT: /* purged object */
- case -ENODEV: /* bad object, how did you get here! */
- return VM_FAULT_SIGBUS;
-
- case -ENOSPC: /* shmemfs allocation failure */
- case -ENOMEM: /* our allocation failure */
- return VM_FAULT_OOM;
-
- case 0:
- case -EAGAIN:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- }
+ return i915_error_to_vmf_fault(ret);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
+ i915_vma_revoke_mmap(vma);
+
+ GEM_BUG_ON(obj->userfault_count);
}
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
+/*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
+ * fixup by vm_fault_gtt().
*/
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
- /* Serialisation between user GTT access and our code depends upon
+ /*
+ * Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*
@@ -406,9 +438,10 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->userfault_count)
goto out;
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
- /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ /*
+ * Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
@@ -422,54 +455,149 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct i915_mmap_offset *mmo;
+
+ spin_lock(&obj->mmo.lock);
+ list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ /*
+ * vma_node_unmap for GTT mmaps handled already in
+ * __i915_gem_object_release_mmap_gtt
+ */
+ if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+ continue;
+
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_node_unmap(&mmo->vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_release_mmap_gtt(obj);
+ i915_gem_object_release_mmap_offset(obj);
+}
+
+static struct i915_mmap_offset *
+mmap_offset_attach(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_gt *gt = &i915->gt;
+ struct i915_mmap_offset *mmo;
int err;
- err = drm_gem_create_mmap_offset(&obj->base);
+ mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
+ if (!mmo)
+ return ERR_PTR(-ENOMEM);
+
+ mmo->obj = obj;
+ mmo->dev = obj->base.dev;
+ mmo->file = file;
+ mmo->mmap_type = mmap_type;
+ drm_vma_node_reset(&mmo->vma_node);
+
+ err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
if (likely(!err))
- return 0;
+ goto out;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ goto err;
i915_gem_drain_freed_objects(i915);
- return drm_gem_create_mmap_offset(&obj->base);
+ err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+ obj->base.size / PAGE_SIZE);
+ if (err)
+ goto err;
+
+out:
+ if (file)
+ drm_vma_node_allow(&mmo->vma_node, file);
+
+ spin_lock(&obj->mmo.lock);
+ list_add(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+
+err:
+ kfree(mmo);
+ return ERR_PTR(err);
}
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
+static int
+__assign_mmap_offset(struct drm_file *file,
+ u32 handle,
+ enum i915_mmap_type mmap_type,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
- int ret;
+ struct i915_mmap_offset *mmo;
+ int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
- if (i915_gem_object_never_bind_ggtt(obj)) {
- ret = -ENODEV;
+ if (mmap_type == I915_MMAP_TYPE_GTT &&
+ i915_gem_object_never_bind_ggtt(obj)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (mmap_type != I915_MMAP_TYPE_GTT &&
+ !i915_gem_object_has_struct_page(obj)) {
+ err = -ENODEV;
goto out;
}
- ret = create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+ mmo = mmap_offset_attach(obj, mmap_type, file);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+ *offset = drm_vma_node_offset_addr(&mmo->vma_node);
+ err = 0;
out:
i915_gem_object_put(obj);
- return ret;
+ return err;
+}
+
+int
+i915_gem_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ u64 *offset)
+{
+ enum i915_mmap_type mmap_type;
+
+ if (boot_cpu_has(X86_FEATURE_PAT))
+ mmap_type = I915_MMAP_TYPE_WC;
+ else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+ return -ENODEV;
+ else
+ mmap_type = I915_MMAP_TYPE_GTT;
+
+ return __assign_mmap_offset(file, handle, mmap_type, offset);
}
/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
@@ -484,12 +612,179 @@ out:
* userspace.
*/
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
- struct drm_i915_gem_mmap_gtt *args = data;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_mmap_offset *args = data;
+ enum i915_mmap_type type;
+ int err;
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+ /*
+ * Historically we failed to check args.pad and args.offset
+ * and so we cannot use those fields for user input and we cannot
+ * add -EINVAL for them as the ABI is fixed, i.e. old userspace
+ * may be feeding in garbage in those fields.
+ *
+ * if (args->pad) return -EINVAL; is verbotten!
+ */
+
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0, NULL);
+ if (err)
+ return err;
+
+ switch (args->flags) {
+ case I915_MMAP_OFFSET_GTT:
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_GTT;
+ break;
+
+ case I915_MMAP_OFFSET_WC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_WC;
+ break;
+
+ case I915_MMAP_OFFSET_WB:
+ type = I915_MMAP_TYPE_WB;
+ break;
+
+ case I915_MMAP_OFFSET_UC:
+ if (!boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+ type = I915_MMAP_TYPE_UC;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __assign_mmap_offset(file, args->handle, type, &args->offset);
+}
+
+static void vm_open(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_get(obj);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo = vma->vm_private_data;
+ struct drm_i915_gem_object *obj = mmo->obj;
+
+ GEM_BUG_ON(!obj);
+ i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct vm_ops_gtt = {
+ .fault = vm_fault_gtt,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+static const struct vm_operations_struct vm_ops_cpu = {
+ .fault = vm_fault_cpu,
+ .open = vm_open,
+ .close = vm_close,
+};
+
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct i915_mmap_offset *mmo = NULL;
+ struct drm_gem_object *obj = NULL;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (likely(node)) {
+ mmo = container_of(node, struct i915_mmap_offset,
+ vma_node);
+ /*
+ * In our dependency chain, the drm_vma_offset_node
+ * depends on the validity of the mmo, which depends on
+ * the gem object. However the only reference we have
+ * at this point is the mmo (as the parent of the node).
+ * Try to check if the gem object was at least cleared.
+ */
+ if (!mmo || !mmo->obj) {
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ return -EINVAL;
+ }
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ obj = &mmo->obj->base;
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ if (!obj)
+ return -EINVAL;
+
+ if (!drm_vma_node_is_allowed(node, priv)) {
+ drm_gem_object_put_unlocked(obj);
+ return -EACCES;
+ }
+
+ if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = mmo;
+
+ switch (mmo->mmap_type) {
+ case I915_MMAP_TYPE_WC:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_WB:
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_UC:
+ vma->vm_page_prot =
+ pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_cpu;
+ break;
+
+ case I915_MMAP_TYPE_GTT:
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_ops = &vm_ops_gtt;
+ break;
+ }
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+ return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
new file mode 100644
index 000000000000..862e01b7cb69
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -0,0 +1,31 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap_gtt_version(void);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index a50296cce0d8..46bacc82ddc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -22,11 +22,14 @@
*
*/
+#include <linux/sched/mm.h>
+
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
#include "i915_trace.h"
@@ -59,6 +62,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
+ spin_lock_init(&obj->mmo.lock);
+ INIT_LIST_HEAD(&obj->mmo.offsets);
+
init_rcu_head(&obj->rcu);
obj->ops = ops;
@@ -95,6 +101,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_lut_handle *lut, *ln;
+ struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@@ -109,6 +116,17 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
i915_gem_object_unlock(obj);
+ spin_lock(&obj->mmo.lock);
+ list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ if (mmo->file != file)
+ continue;
+
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_node_revoke(&mmo->vma_node, file);
+ spin_lock(&obj->mmo.lock);
+ }
+ spin_unlock(&obj->mmo.lock);
+
list_for_each_entry_safe(lut, ln, &close, obj_link) {
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
@@ -156,6 +174,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
+ struct i915_mmap_offset *mmo, *mn;
+
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
@@ -174,19 +194,28 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
spin_lock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
+ i915_gem_object_release_mmap(obj);
+
+ list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ INIT_LIST_HEAD(&obj->mmo.offsets);
+
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
@@ -277,18 +306,14 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- for_each_ggtt_vma(vma, obj)
- intel_gt_flush_ggtt_writes(vma->vm->gt);
-
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
-
+ spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
- if (vma->iomap)
- continue;
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
+ spin_unlock(&obj->vma.lock);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
@@ -308,6 +333,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
obj->write_domain = 0;
}
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_flush(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_invalidate(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 458cd51331f1..858f8bf49a04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -13,8 +13,8 @@
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
-
#include "i915_gem_gtt.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -132,13 +132,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
- obj->base.vma_node.readonly = true;
+ obj->flags |= I915_BO_READONLY;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
- return obj->base.vma_node.readonly;
+ return obj->flags & I915_BO_READONLY;
}
static inline bool
@@ -271,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ /*
+ * Only used by struct_mutex, when called "recursively" from
+ * direct-reclaim-esque. Safe because there is only every one
+ * struct_mutex in the entire system.
+ */
+ I915_MM_SHRINKER = 1,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES = 1,
+};
+
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock(&obj->mm.lock);
+ might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
@@ -317,13 +334,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
@@ -376,9 +387,6 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains);
@@ -463,4 +471,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 96008374a412..88e268633fdc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,23 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *obj);
};
+enum i915_mmap_type {
+ I915_MMAP_TYPE_GTT = 0,
+ I915_MMAP_TYPE_WC,
+ I915_MMAP_TYPE_WB,
+ I915_MMAP_TYPE_UC,
+};
+
+struct i915_mmap_offset {
+ struct drm_device *dev;
+ struct drm_vma_offset_node vma_node;
+ struct drm_i915_gem_object *obj;
+ struct drm_file *file;
+ enum i915_mmap_type mmap_type;
+
+ struct list_head offset;
+};
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -118,12 +135,18 @@ struct drm_i915_gem_object {
unsigned int userfault_count;
struct list_head userfault_link;
+ struct {
+ spinlock_t lock; /* Protects access to mmo offsets */
+ struct list_head offsets;
+ } mmo;
+
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+#define I915_BO_READONLY BIT(2)
/*
* Is the object to be mapped as read-only to the GPU
@@ -150,7 +173,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- struct intel_frontbuffer *frontbuffer;
+ struct intel_frontbuffer __rcu *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
@@ -162,7 +185,11 @@ struct drm_i915_gem_object {
atomic_t bind_count;
struct {
- struct mutex lock; /* protects the pages and their use */
+ /*
+ * Protects the pages and their use. Do not use directly, but
+ * instead go through the pin/unpin interfaces.
+ */
+ struct mutex lock;
atomic_t pages_pin_count;
atomic_t shrink_pin;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 29f4c2850745..75197ca696a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -8,6 +8,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -106,7 +107,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return err;
@@ -190,8 +191,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
int err;
@@ -202,12 +202,14 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
+ mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
}
+ i915_gem_object_release_mmap_offset(obj);
+
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
@@ -308,7 +310,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible(&obj->mm.lock);
+ err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (err)
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 8043ff63d73f..b1b7c1b3038a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -164,7 +164,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err)
return err;
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->mm.madv != I915_MADV_WILLNEED) {
err = -EFAULT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index f88ee1317bb4..c8264eb036bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -13,7 +13,7 @@
void i915_gem_suspend(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
@@ -99,30 +99,12 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
void i915_gem_resume(struct drm_i915_private *i915)
{
- GEM_TRACE("\n");
-
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- if (intel_gt_init_hw(&i915->gt))
- goto err_wedged;
+ GEM_TRACE("%s\n", dev_name(i915->drm.dev));
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- if (intel_gt_resume(&i915->gt))
- goto err_wedged;
-
-out_unlock:
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- return;
-
-err_wedged:
- if (!intel_gt_is_wedged(&i915->gt)) {
- dev_err(i915->drm.dev,
- "Failed to re-initialize GPU, declaring it wedged!\n");
- intel_gt_set_wedged(&i915->gt);
- }
- goto out_unlock;
+ intel_gt_resume(&i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 2f7bcfb9c964..d50adac12249 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -85,7 +85,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
}
prev_end = offset + block_size;
- };
+ }
sg_page_sizes |= sg->length;
sg_mark_end(sg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f2418a1cfe68..f7e4b39c734f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,7 +57,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
if (i915_gem_object_unbind(obj, flags) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ __i915_gem_object_put_pages(obj);
return !i915_gem_object_has_pages(obj);
}
@@ -209,8 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
- mutex_lock_nested(&obj->mm.lock,
- I915_MM_SHRINKER);
+ mutex_lock(&obj->mm.lock);
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index a2d49c04e6a4..afb08a1704a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -26,48 +26,49 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start, u64 end)
{
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return -ENODEV;
/* WaSkipStolenMemoryFirstPage:bdw+ */
- if (INTEL_GEN(dev_priv) >= 8 && start < 4096)
+ if (INTEL_GEN(i915) >= 8 && start < 4096)
start = 4096;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
size, alignment, 0,
start, end, DRM_MM_INSERT_BEST);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+ return i915_gem_stolen_insert_node_in_range(i915, node, size,
alignment, 0, U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
+void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
struct drm_mm_node *node)
{
- mutex_lock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_unlock(&i915->mm.stolen_lock);
}
-static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
if (dsm->start == 0 || dsm->end <= dsm->start)
@@ -79,14 +80,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
*/
/* Make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(dev_priv) <= 4 &&
- !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
+ if (INTEL_GEN(i915) <= 4 &&
+ !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
struct resource stolen[2] = {*dsm, *dsm};
struct resource ggtt_res;
resource_size_t ggtt_start;
- ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN(dev_priv, 4))
+ ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
+ if (IS_GEN(i915, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -120,7 +121,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start,
resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
@@ -133,14 +134,14 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN(dev_priv, 3)) {
+ if (!r && !IS_GEN(i915, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
@@ -151,25 +152,27 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
{
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return;
- drm_mm_takedown(&dev_priv->mm.stolen);
+ drm_mm_takedown(&i915->mm.stolen);
}
-static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(IS_GM45(dev_priv) ?
- CTG_STOLEN_RESERVED :
- ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore,
+ IS_GM45(i915) ?
+ CTG_STOLEN_RESERVED :
+ ELK_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val);
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -178,7 +181,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+ WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
@@ -190,11 +193,12 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -222,12 +226,13 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -250,11 +255,12 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
*base = stolen_top - *size;
}
-static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -276,11 +282,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void chv_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -308,12 +315,13 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = dev_priv->dsm.end + 1;
+ u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
+ resource_size_t stolen_top = i915->dsm.end + 1;
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -328,10 +336,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void icl_get_stolen_reserved(struct drm_i915_private *i915,
+ struct intel_uncore *uncore,
resource_size_t *base,
resource_size_t *size)
{
- u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
+ u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
@@ -356,22 +365,23 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
- mutex_init(&dev_priv->mm.stolen_lock);
+ mutex_init(&i915->mm.stolen_lock);
- if (intel_vgpu_active(dev_priv)) {
- dev_notice(dev_priv->drm.dev,
+ if (intel_vgpu_active(i915)) {
+ dev_notice(i915->drm.dev,
"%s, disabling use of stolen memory\n",
"iGVT-g active");
return 0;
}
- if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
- dev_notice(dev_priv->drm.dev,
+ if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
+ dev_notice(i915->drm.dev,
"%s, disabling use of stolen memory\n",
"DMAR active");
return 0;
@@ -380,58 +390,59 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->dsm = intel_graphics_stolen_res;
+ i915->dsm = intel_graphics_stolen_res;
- if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
+ if (i915_adjust_stolen(i915, &i915->dsm))
return 0;
- GEM_BUG_ON(dev_priv->dsm.start == 0);
- GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+ GEM_BUG_ON(i915->dsm.start == 0);
+ GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
- stolen_top = dev_priv->dsm.end + 1;
+ stolen_top = i915->dsm.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(i915)) {
case 2:
case 3:
break;
case 4:
- if (!IS_G4X(dev_priv))
+ if (!IS_G4X(i915))
break;
/* fall through */
case 5:
- g4x_get_stolen_reserved(dev_priv,
+ g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 6:
- gen6_get_stolen_reserved(dev_priv,
+ gen6_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 7:
- if (IS_VALLEYVIEW(dev_priv))
- vlv_get_stolen_reserved(dev_priv,
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- gen7_get_stolen_reserved(dev_priv,
+ gen7_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 8:
case 9:
case 10:
- if (IS_LP(dev_priv))
- chv_get_stolen_reserved(dev_priv,
+ if (IS_LP(i915))
+ chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
- bdw_get_stolen_reserved(dev_priv,
+ bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
default:
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
/* fall-through */
case 11:
case 12:
- icl_get_stolen_reserved(dev_priv, &reserved_base,
+ icl_get_stolen_reserved(i915, uncore,
+ &reserved_base,
&reserved_size);
break;
}
@@ -448,12 +459,12 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_size = 0;
}
- dev_priv->dsm_reserved =
- (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm_reserved =
+ (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &dev_priv->dsm_reserved, &dev_priv->dsm);
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -462,14 +473,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&dev_priv->dsm) >> 10,
- ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
- dev_priv->stolen_usable_size =
- resource_size(&dev_priv->dsm) - reserved_total;
+ i915->stolen_usable_size =
+ resource_size(&i915->dsm) - reserved_total;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
return 0;
}
@@ -478,11 +489,11 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
resource_size_t offset, resource_size_t size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -502,7 +513,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -533,16 +544,15 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- i915_gem_stolen_remove_node(dev_priv, stolen);
- kfree(stolen);
+ i915_gem_object_release_memory_region(obj);
- if (obj->mm.region)
- i915_gem_object_release_memory_region(obj);
+ i915_gem_stolen_remove_node(i915, stolen);
+ kfree(stolen);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -552,9 +562,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen,
- struct intel_memory_region *mem)
+__i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ struct drm_mm_node *stolen)
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
@@ -565,20 +574,19 @@ __i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
if (!obj)
goto err;
- drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
+ drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
err = i915_gem_object_pin_pages(obj);
if (err)
goto cleanup;
- if (mem)
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -593,12 +601,12 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
resource_size_t size,
unsigned int flags)
{
- struct drm_i915_private *dev_priv = mem->i915;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
if (size == 0)
@@ -608,30 +616,30 @@ _i915_gem_object_create_stolen(struct intel_memory_region *mem,
if (!stolen)
return ERR_PTR(-ENOMEM);
- ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
+ ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096);
if (ret) {
obj = ERR_PTR(ret);
goto err_free;
}
- obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+ obj = __i915_gem_object_create_stolen(mem, stolen);
if (IS_ERR(obj))
goto err_remove;
return obj;
err_remove:
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return obj;
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
- return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
size, I915_BO_ALLOC_CONTIGUOUS);
}
@@ -665,18 +673,19 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
resource_size_t gtt_offset,
resource_size_t size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
- if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
@@ -694,19 +703,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
stolen->start = stolen_offset;
stolen->size = size;
- mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
- mutex_unlock(&dev_priv->mm.stolen_lock);
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+ mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
return ERR_PTR(ret);
}
- obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+ obj = __i915_gem_object_create_stolen(mem, stolen);
if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(dev_priv, stolen);
+ i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 1fa592d82af5..6c7825a2dc2a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 4c72d74d6576..e5558af111e2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -129,9 +129,10 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_unlock(&mn->lock);
ret = i915_gem_object_unbind(obj,
- I915_GEM_OBJECT_UNBIND_ACTIVE);
+ I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
if (ret == 0)
- ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ ret = __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (ret)
return ret;
@@ -459,31 +460,36 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
+ int locked = 0;
if (!i915_gem_object_is_readonly(obj))
flags |= FOLL_WRITE;
ret = -EFAULT;
if (mmget_not_zero(mm)) {
- down_read(&mm->mmap_sem);
while (pinned < npages) {
+ if (!locked) {
+ down_read(&mm->mmap_sem);
+ locked = 1;
+ }
ret = get_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
flags,
- pvec + pinned, NULL, NULL);
+ pvec + pinned, NULL, &locked);
if (ret < 0)
break;
pinned += ret;
}
- up_read(&mm->mmap_sem);
+ if (locked)
+ up_read(&mm->mmap_sem);
mmput(mm);
}
}
- mutex_lock(&obj->mm.lock);
+ mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
@@ -773,15 +779,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- struct i915_address_space *vm;
-
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
- true); /* static vm */
- if (!vm || !vm->has_read_only)
+ if (!dev_priv->gt.vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 892d12db6c49..fa16f2c3f3ac 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -12,10 +12,14 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
unsigned long nreal = obj->scratch / PAGE_SIZE;
- struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ struct page *page;
- for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
- __free_page(sg_page(sg));
+ for_each_sgt_page(page, sgt_iter, pages) {
+ __free_page(page);
+ if (!--nreal)
+ break;
+ }
sg_free_table(pages);
kfree(pages);
@@ -70,7 +74,6 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
err:
huge_free_pages(obj, pages);
-
return -ENOMEM;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 688c49a24f32..2479395c1873 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -517,7 +517,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
i915_vma_unpin(vma);
i915_vma_close(vma);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -650,7 +650,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -678,7 +678,7 @@ static void close_object_list(struct list_head *objects,
list_del(&obj->st_link);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -948,7 +948,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_close(vma);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1110,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce,
out_vma_unpin:
i915_vma_unpin(vma);
out_vma_close:
- i915_vma_destroy(vma);
-
+ __i915_vma_put(vma);
return err;
}
@@ -1301,7 +1300,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
}
@@ -1420,7 +1419,7 @@ try_again:
err = i915_gem_object_pin_pages(obj);
if (err) {
- if (err == -ENXIO) {
+ if (err == -ENXIO || err == -E2BIG) {
i915_gem_object_put(obj);
size >>= 1;
goto try_again;
@@ -1442,7 +1441,7 @@ try_again:
}
out_unpin:
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
out_put:
i915_gem_object_put(obj);
@@ -1530,7 +1529,7 @@ static int igt_ppgtt_sanity_check(void *arg)
err = igt_write_huge(ctx, obj);
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
if (err) {
@@ -1912,9 +1911,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
- struct drm_file *file;
struct i915_gem_context *ctx;
struct i915_address_space *vm;
+ struct file *file;
int err;
if (!HAS_PPGTT(i915)) {
@@ -1944,6 +1943,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
err = i915_subtests(tests, ctx);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index da8edee4fe0a..b972be165e85 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -24,6 +24,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
prandom_seed_state(&prng, i915_selftest.random_seed);
+ intel_engine_pm_get(engine);
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
@@ -99,6 +100,7 @@ err_put:
err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_engine_pm_put(engine);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 2b29f6b4e1dd..49edc51111d5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
@@ -200,7 +201,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(ctx->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -326,6 +327,7 @@ static int igt_gem_coherency(void *arg)
ctx.engine = random_engine(i915, &prng);
GEM_BUG_ON(!ctx.engine);
pr_info("%s: using %s\n", __func__, ctx.engine->name);
+ intel_engine_pm_get(ctx.engine);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
@@ -404,6 +406,7 @@ static int igt_gem_coherency(void *arg)
}
}
free:
+ intel_engine_pm_put(ctx.engine);
kfree(offsets);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 62fabc023a83..7fc46861a54d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
@@ -26,6 +27,12 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
+{
+ /* single threaded, private ctx */
+ return rcu_dereference_protected(ctx->vm, true);
+}
+
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -33,7 +40,7 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
unsigned long n;
int err = -ENODEV;
@@ -67,25 +74,34 @@ static int live_nop_switch(void *arg)
}
for_each_uabi_engine(engine, i915) {
- struct i915_request *rq;
+ struct i915_request *rq = NULL;
unsigned long end_time, prime;
ktime_t times[2] = {};
times[0] = ktime_get_raw();
for (n = 0; n < nctx; n++) {
- rq = igt_request_alloc(ctx[n], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto out_file;
}
- i915_request_add(rq);
+ if (rq) {
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
+ }
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
err = -EIO;
goto out_file;
}
+ i915_request_put(rq);
times[1] = ktime_get_raw();
@@ -100,13 +116,21 @@ static int live_nop_switch(void *arg)
for_each_prime_number_from(prime, 2, 8192) {
times[1] = ktime_get_raw();
+ rq = NULL;
for (n = 0; n < prime; n++) {
- rq = igt_request_alloc(ctx[n % nctx], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
+ struct i915_request *this;
+
+ this = igt_request_alloc(ctx[n % nctx], engine);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto out_file;
}
+ if (rq) { /* Force submission order */
+ i915_request_await_dma_fence(this, &rq->fence);
+ i915_request_put(rq);
+ }
+
/*
* This space is left intentionally blank.
*
@@ -121,14 +145,18 @@ static int live_nop_switch(void *arg)
* for latency.
*/
- i915_request_add(rq);
+ rq = i915_request_get(this);
+ i915_request_add(this);
}
+ GEM_BUG_ON(!rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
intel_gt_set_wedged(&i915->gt);
+ i915_request_put(rq);
break;
}
+ i915_request_put(rq);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 2)
@@ -149,7 +177,7 @@ static int live_nop_switch(void *arg)
}
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -255,7 +283,7 @@ static int live_parallel_switch(void *arg)
int (* const *fn)(void *arg);
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
+ struct file *file;
int n, m, count;
int err = 0;
@@ -309,7 +337,7 @@ static int live_parallel_switch(void *arg)
if (!data[m].ce[0])
continue;
- ce = intel_context_create(ctx, data[m].ce[0]->engine);
+ ce = intel_context_create(data[m].ce[0]->engine);
if (IS_ERR(ce))
goto out;
@@ -377,7 +405,7 @@ out:
}
kfree(data);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -502,17 +530,17 @@ out_unmap:
return err;
}
-static int file_add_object(struct drm_file *file,
- struct drm_i915_gem_object *obj)
+static int file_add_object(struct file *file, struct drm_i915_gem_object *obj)
{
int err;
GEM_BUG_ON(obj->base.handle_count);
/* tie the object to the drm_file for easy reaping */
- err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
+ err = idr_alloc(&to_drm_file(file)->object_idr,
+ &obj->base, 1, 0, GFP_KERNEL);
if (err < 0)
- return err;
+ return err;
i915_gem_object_get(obj);
obj->base.handle_count++;
@@ -521,7 +549,7 @@ static int file_add_object(struct drm_file *file,
static struct drm_i915_gem_object *
create_test_object(struct i915_address_space *vm,
- struct drm_file *file,
+ struct file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
@@ -621,9 +649,9 @@ static int igt_ctx_exec(void *arg)
unsigned long ncontexts, ndwords, dw;
struct i915_request *tq[5] = {};
struct igt_live_test t;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
if (!intel_engine_can_store_dword(engine))
continue;
@@ -716,7 +744,7 @@ out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
if (err)
return err;
@@ -733,7 +761,7 @@ static int igt_shared_ctx_exec(void *arg)
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct drm_file *file;
+ struct file *file;
int err = 0;
/*
@@ -786,14 +814,15 @@ static int igt_shared_ctx_exec(void *arg)
}
mutex_lock(&ctx->mutex);
- __assign_ppgtt(ctx, parent->vm);
+ __assign_ppgtt(ctx, ctx_vm(parent));
mutex_unlock(&ctx->mutex);
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent->vm, file, &objects);
+ obj = create_test_object(ctx_vm(parent),
+ file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
intel_context_put(ce);
@@ -854,7 +883,7 @@ out_test:
if (igt_live_test_end(&t))
err = -EIO;
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -1140,8 +1169,7 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = intel_gt_wait_for_idle(ce->engine->gt,
- MAX_SCHEDULE_TIMEOUT);
+ ret = igt_flush_test(ce->engine->i915);
if (ret)
return ret;
@@ -1163,9 +1191,11 @@ __sseu_test(const char *name,
struct igt_spinner *spin = NULL;
int ret;
+ intel_engine_pm_get(ce->engine);
+
ret = __sseu_prepare(name, flags, ce, &spin);
if (ret)
- return ret;
+ goto out_pm;
ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
@@ -1180,6 +1210,8 @@ out_spin:
igt_spinner_fini(spin);
kfree(spin);
}
+out_pm:
+ intel_engine_pm_put(ce->engine);
return ret;
}
@@ -1232,8 +1264,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
hweight32(engine->sseu.slice_mask),
hweight32(pg_sseu.slice_mask));
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_put;
@@ -1311,16 +1342,18 @@ static int igt_ctx_sseu(void *arg)
static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
+ unsigned long idx, ndwords, dw, num_engines;
struct drm_i915_gem_object *obj = NULL;
struct i915_request *tq[5] = {};
+ struct i915_gem_engines_iter it;
struct i915_address_space *vm;
struct i915_gem_context *ctx;
- unsigned long idx, ndwords, dw;
+ struct intel_context *ce;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
+ struct file *file;
int err = -ENODEV;
/*
@@ -1343,21 +1376,21 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
+ vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
- rcu_read_unlock();
err = 0;
goto out_file;
}
- rcu_read_unlock();
+
+ num_engines = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ if (intel_engine_can_store_dword(ce->engine))
+ num_engines++;
+ i915_gem_context_unlock_engines(ctx);
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
if (!intel_engine_can_store_dword(ce->engine))
@@ -1380,7 +1413,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
ce->engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(!!ctx_vm(ctx)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
@@ -1400,8 +1433,8 @@ static int igt_ctx_readonly(void *arg)
}
i915_gem_context_unlock_engines(ctx);
}
- pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Submitted %lu dwords (across %lu engines)\n",
+ ndwords, num_engines);
dw = 0;
idx = 0;
@@ -1426,7 +1459,7 @@ out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -1466,7 +1499,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
@@ -1488,12 +1521,12 @@ static int write_to_scratch(struct i915_gem_context *ctx,
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vm;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err_vm;
+ goto out_vm;
err = check_scratch(vm, offset);
if (err)
@@ -1517,22 +1550,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin_and_release(&vma, 0);
+ i915_vma_unpin(vma);
i915_request_add(rq);
- i915_vm_put(vm);
- return 0;
-
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err_vm:
+out_vm:
i915_vm_put(vm);
-err:
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1560,7 +1591,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto out;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
@@ -1592,12 +1623,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vm;
+ goto out_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err_vm;
+ goto out_vm;
err = check_scratch(vm, offset);
if (err)
@@ -1630,29 +1661,27 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err_vm;
+ goto out_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err_vm;
+ goto out_vm;
}
*value = cmd[result / sizeof(*cmd)];
i915_gem_object_unpin_map(obj);
- i915_gem_object_put(obj);
-
- return 0;
+ goto out_vm;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
-err_vm:
+out_vm:
i915_vm_put(vm);
-err:
+out:
i915_gem_object_put(obj);
return err;
}
@@ -1661,11 +1690,11 @@ static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
+ unsigned long num_engines, count;
struct intel_engine_cs *engine;
struct igt_live_test t;
- struct drm_file *file;
I915_RND_STATE(prng);
- unsigned long count;
+ struct file *file;
u64 vm_total;
int err;
@@ -1698,14 +1727,15 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->vm == ctx_b->vm)
+ if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file;
- vm_total = ctx_a->vm->total;
- GEM_BUG_ON(ctx_b->vm->total != vm_total);
+ vm_total = ctx_vm(ctx_a)->total;
+ GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
+ num_engines = 0;
for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1743,14 +1773,15 @@ static int igt_vm_isolation(void *arg)
this++;
}
count += this;
+ num_engines++;
}
- pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_engines);
+ pr_info("Checked %lu scratch offsets across %lu engines\n",
+ count, num_engines);
out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 29b2077b73d2..cbf796da64e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,12 +6,14 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/igt_mmap.h"
struct tile {
unsigned int width;
@@ -161,7 +163,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
kunmap(p);
out:
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
return err;
}
@@ -255,7 +257,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (err)
return err;
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
if (igt_timeout(end_time,
"%s: timed out after tiling=%d stride=%d\n",
@@ -535,7 +537,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -563,16 +565,16 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
int expected)
{
struct drm_i915_gem_object *obj;
- int err;
+ struct i915_mmap_offset *mmo;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = create_mmap_offset(obj);
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
- return err == expected;
+ return PTR_ERR_OR_ZERO(mmo) == expected;
}
static void disable_retire_worker(struct drm_i915_private *i915)
@@ -606,28 +608,50 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
struct drm_i915_gem_object *obj;
- struct drm_mm_node resv, *hole;
- u64 hole_start, hole_end;
- int loop, err;
+ struct drm_mm_node *hole, *next;
+ struct i915_mmap_offset *mmo;
+ int loop, err = 0;
/* Disable background reaper */
disable_retire_worker(i915);
GEM_BUG_ON(!i915->gt.awake);
+ intel_gt_retire_requests(&i915->gt);
+ i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */
- memset(&resv, 0, sizeof(resv));
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- resv.start = hole_start;
- resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
- mmap_offset_lock(i915);
- err = drm_mm_reserve_node(mm, &resv);
- mmap_offset_unlock(i915);
+ mmap_offset_lock(i915);
+ loop = 1; /* PAGE_SIZE units */
+ list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
+ struct drm_mm_node *resv;
+
+ resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
+ if (!resv) {
+ err = -ENOMEM;
+ goto out_park;
+ }
+
+ resv->start = drm_mm_hole_node_start(hole) + loop;
+ resv->size = hole->hole_size - loop;
+ resv->color = -1ul;
+ loop = 0;
+
+ if (!resv->size) {
+ kfree(resv);
+ continue;
+ }
+
+ pr_debug("Reserving hole [%llx + %llx]\n",
+ resv->start, resv->size);
+
+ err = drm_mm_reserve_node(mm, resv);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
+ kfree(resv);
goto out_park;
}
- break;
}
+ GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
+ mmap_offset_unlock(i915);
/* Just fits! */
if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
@@ -650,9 +674,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = create_mmap_offset(obj);
- if (err) {
+ mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+ if (IS_ERR(mmo)) {
pr_err("Unable to insert object into reclaimed hole\n");
+ err = PTR_ERR(mmo);
goto err_obj;
}
@@ -684,9 +709,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
out:
mmap_offset_lock(i915);
- drm_mm_remove_node(&resv);
- mmap_offset_unlock(i915);
out_park:
+ drm_mm_for_each_node_safe(hole, next, mm) {
+ if (hole->color != -1ul)
+ continue;
+
+ drm_mm_remove_node(hole);
+ kfree(hole);
+ }
+ mmap_offset_unlock(i915);
restore_retire_worker(i915);
return err;
err_obj:
@@ -694,12 +725,258 @@ err_obj:
goto out;
}
+#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
+static int igt_mmap(void *arg, enum i915_mmap_type type)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_mmap_offset *mmo;
+ struct vm_area_struct *area;
+ unsigned long addr;
+ void *vaddr;
+ int err = 0, i;
+
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
+ memset(vaddr, POISON_INUSE, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+ goto out;
+ }
+
+ pr_debug("igt_mmap() @ %lx\n", addr);
+
+ area = find_vma(current->mm, addr);
+ if (!area) {
+ pr_err("Did not create a vm_area_struct for the mmap\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ if (area->vm_private_data != mmo) {
+ pr_err("vm_area_struct did not point back to our mmap_offset object!\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) {
+ u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
+ u32 x;
+
+ if (get_user(x, ux)) {
+ pr_err("Unable to read from mmap, offset:%zd\n",
+ i * sizeof(x));
+ err = -EFAULT;
+ break;
+ }
+
+ if (x != expand32(POISON_INUSE)) {
+ pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
+ i * sizeof(x), x, expand32(POISON_INUSE));
+ err = -EINVAL;
+ break;
+ }
+
+ x = expand32(POISON_FREE);
+ if (put_user(x, ux)) {
+ pr_err("Unable to write to mmap, offset:%zd\n",
+ i * sizeof(x));
+ err = -EFAULT;
+ break;
+ }
+ }
+
+out_unmap:
+ vm_munmap(addr, PAGE_SIZE);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
+ if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) {
+ pr_err("Write via mmap did not land in backing store\n");
+ err = -EINVAL;
+ }
+ i915_gem_object_unpin_map(obj);
+
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_mmap_gtt(void *arg)
+{
+ return igt_mmap(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu(void *arg)
+{
+ return igt_mmap(arg, I915_MMAP_TYPE_WC);
+}
+
+static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (!pte_present(*pte) || pte_none(*pte)) {
+ pr_err("missing PTE:%lx\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
+{
+ if (pte_present(*pte) && !pte_none(*pte)) {
+ pr_err("present PTE:%lx; expected to be revoked\n",
+ (addr - (unsigned long)data) >> PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_present(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_present_pte, (void *)addr);
+}
+
+static int check_absent(unsigned long addr, unsigned long len)
+{
+ return apply_to_page_range(current->mm, addr, len,
+ check_absent_pte, (void *)addr);
+}
+
+static int prefault_range(u64 start, u64 len)
+{
+ const char __user *addr, *end;
+ char __maybe_unused c;
+ int err;
+
+ addr = u64_to_user_ptr(start);
+ end = addr + len;
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ err = __get_user(c, addr);
+ if (err)
+ return err;
+ }
+
+ return __get_user(c, end - 1);
+}
+
+static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_mmap_offset *mmo;
+ unsigned long addr;
+ int err;
+
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
+ obj = i915_gem_object_create_internal(i915, SZ_4M);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ mmo = mmap_offset_attach(obj, type, NULL);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+
+ addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
+ if (IS_ERR_VALUE(addr)) {
+ err = addr;
+ goto out;
+ }
+
+ err = prefault_range(addr, obj->base.size);
+ if (err)
+ goto out_unmap;
+
+ GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+ !atomic_read(&obj->bind_count));
+
+ err = check_present(addr, obj->base.size);
+ if (err)
+ goto out_unmap;
+
+ /*
+ * After unbinding the object from the GGTT, its address may be reused
+ * for other objects. Ergo we have to revoke the previous mmap PTE
+ * access as it no longer points to the same object.
+ */
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
+ if (err) {
+ pr_err("Failed to unbind object!\n");
+ goto out_unmap;
+ }
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+ if (type != I915_MMAP_TYPE_GTT) {
+ __i915_gem_object_put_pages(obj);
+ if (i915_gem_object_has_pages(obj)) {
+ pr_err("Failed to put-pages object!\n");
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+ err = check_absent(addr, obj->base.size);
+ if (err)
+ goto out_unmap;
+
+out_unmap:
+ vm_munmap(addr, obj->base.size);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_mmap_gtt_revoke(void *arg)
+{
+ return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu_revoke(void *arg)
+{
+ return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC);
+}
+
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
+ SUBTEST(igt_mmap_gtt),
+ SUBTEST(igt_mmap_cpu),
+ SUBTEST(igt_mmap_gtt_revoke),
+ SUBTEST(igt_mmap_cpu_revoke),
};
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e8132aca0bb6..62077fe46715 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -41,6 +41,7 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
if (!engine)
return 0;
+ intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1;
@@ -49,17 +50,20 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
err = i915_gem_object_fill_blt(obj, ce, 0);
if (err)
- return err;
+ break;
err = i915_gem_object_wait(obj,
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
@@ -109,6 +113,7 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
struct intel_engine_cs *engine;
ktime_t t[5];
int pass;
+ int err = 0;
engine = intel_engine_lookup_user(i915,
I915_ENGINE_CLASS_COPY,
@@ -116,26 +121,29 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
if (!engine)
return 0;
+ intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1;
- int err;
t0 = ktime_get();
err = i915_gem_object_copy_blt(src, dst, ce);
if (err)
- return err;
+ break;
err = i915_gem_object_wait(dst,
I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT);
if (err)
- return err;
+ break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
+ intel_engine_pm_put(engine);
+ if (err)
+ return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
@@ -186,6 +194,8 @@ err_src:
struct igt_thread_arg {
struct drm_i915_private *i915;
+ struct i915_gem_context *ctx;
+ struct file *file;
struct rnd_state prng;
unsigned int n_cpus;
};
@@ -198,24 +208,20 @@ static int igt_fill_blt_thread(void *arg)
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
unsigned int prio;
IGT_TIMEOUT(end);
int err;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -300,8 +306,6 @@ err_flush:
err = 0;
intel_context_put(ce);
-out_file:
- mock_file_free(i915, file);
return err;
}
@@ -313,24 +317,20 @@ static int igt_copy_blt_thread(void *arg)
struct drm_i915_gem_object *src, *dst;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
unsigned int prio;
IGT_TIMEOUT(end);
int err;
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ ctx = thread->ctx;
+ if (!ctx) {
+ ctx = live_context(i915, thread->file);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
}
- prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
- ctx->sched.priority = I915_USER_PRIORITY(prio);
-
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
@@ -431,19 +431,18 @@ err_flush:
err = 0;
intel_context_put(ce);
-out_file:
- mock_file_free(i915, file);
return err;
}
static int igt_threaded_blt(struct drm_i915_private *i915,
- int (*blt_fn)(void *arg))
+ int (*blt_fn)(void *arg),
+ unsigned int flags)
+#define SINGLE_CTX BIT(0)
{
struct igt_thread_arg *thread;
struct task_struct **tsk;
+ unsigned int n_cpus, i;
I915_RND_STATE(prng);
- unsigned int n_cpus;
- unsigned int i;
int err = 0;
n_cpus = num_online_cpus() + 1;
@@ -453,13 +452,27 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
return 0;
thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
- if (!thread) {
- kfree(tsk);
- return 0;
+ if (!thread)
+ goto out_tsk;
+
+ thread[0].file = mock_file(i915);
+ if (IS_ERR(thread[0].file)) {
+ err = PTR_ERR(thread[0].file);
+ goto out_thread;
+ }
+
+ if (flags & SINGLE_CTX) {
+ thread[0].ctx = live_context(i915, thread[0].file);
+ if (IS_ERR(thread[0].ctx)) {
+ err = PTR_ERR(thread[0].ctx);
+ goto out_file;
+ }
}
for (i = 0; i < n_cpus; ++i) {
thread[i].i915 = i915;
+ thread[i].file = thread[0].file;
+ thread[i].ctx = thread[0].ctx;
thread[i].n_cpus = n_cpus;
thread[i].prng =
I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
@@ -488,29 +501,42 @@ static int igt_threaded_blt(struct drm_i915_private *i915,
put_task_struct(tsk[i]);
}
- kfree(tsk);
+out_file:
+ fput(thread[0].file);
+out_thread:
kfree(thread);
-
+out_tsk:
+ kfree(tsk);
return err;
}
static int igt_fill_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_fill_blt_thread);
+ return igt_threaded_blt(arg, igt_fill_blt_thread, 0);
+}
+
+static int igt_fill_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread, SINGLE_CTX);
}
static int igt_copy_blt(void *arg)
{
- return igt_threaded_blt(arg, igt_copy_blt_thread);
+ return igt_threaded_blt(arg, igt_copy_blt_thread, 0);
+}
+
+static int igt_copy_blt_ctx0(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread, SINGLE_CTX);
}
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(perf_fill_blt),
- SUBTEST(perf_copy_blt),
SUBTEST(igt_fill_blt),
+ SUBTEST(igt_fill_blt_ctx0),
SUBTEST(igt_copy_blt),
+ SUBTEST(igt_copy_blt_ctx0),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -521,3 +547,16 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
return i915_live_subtests(tests, i915);
}
+
+int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 29b8984f0e47..7d7e13dc2fdf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -5,6 +5,7 @@
*/
#include "mock_context.h"
+#include "selftests/mock_drm.h"
#include "selftests/mock_gtt.h"
struct i915_gem_context *
@@ -36,9 +37,7 @@ mock_context(struct drm_i915_private *i915,
if (name) {
struct i915_ppgtt *ppgtt;
- ctx->name = kstrdup(name, GFP_KERNEL);
- if (!ctx->name)
- goto err_put;
+ strncpy(ctx->name, name, sizeof(ctx->name));
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
@@ -74,7 +73,7 @@ void mock_init_contexts(struct drm_i915_private *i915)
}
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file)
+live_context(struct drm_i915_private *i915, struct file *file)
{
struct i915_gem_context *ctx;
int err;
@@ -83,7 +82,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
if (IS_ERR(ctx))
return ctx;
- err = gem_context_register(ctx, file->driver_priv);
+ err = gem_context_register(ctx, to_drm_file(file)->driver_priv);
if (err < 0)
goto err_ctx;
@@ -97,7 +96,16 @@ err_ctx:
struct i915_gem_context *
kernel_context(struct drm_i915_private *i915)
{
- return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+ struct i915_gem_context *ctx;
+
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
+
+ return ctx;
}
void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
index 0b926653914f..fb83d2f09212 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -7,6 +7,9 @@
#ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H
+struct file;
+struct drm_i915_private;
+
void mock_init_contexts(struct drm_i915_private *i915);
struct i915_gem_context *
@@ -16,7 +19,7 @@ mock_context(struct drm_i915_private *i915,
void mock_context_close(struct i915_gem_context *ctx);
struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
+live_context(struct drm_i915_private *i915, struct file *file);
struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
void kernel_context_close(struct i915_gem_context *ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
index f0f8bbd82dfc..22818bbb139d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -14,7 +14,7 @@ struct mock_dmabuf {
struct page *pages[];
};
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+static inline struct mock_dmabuf *to_mock(struct dma_buf *buf)
{
return buf->priv;
}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/debugfs_engines.c
new file mode 100644
index 000000000000..6a5e9ab20b94
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "i915_drv.h" /* for_each_engine! */
+#include "intel_engine.h"
+
+static int engines_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct drm_printer p;
+
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, gt, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "engines", &engines_fops },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
new file mode 100644
index 000000000000..f69257eaa1cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_engines.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_ENGINES_H
+#define DEBUGFS_ENGINES_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
new file mode 100644
index 000000000000..75255aaacaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "debugfs_engines.h"
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+
+void debugfs_gt_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ debugfs_engines_register(gt, root);
+ debugfs_gt_pm_register(gt, root);
+}
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count)
+{
+ while (count--) {
+ if (!files->eval || files->eval(gt))
+ debugfs_create_file(files->name,
+ 0444, root, gt,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/debugfs_gt.h
new file mode 100644
index 000000000000..4ea0f06cda8f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_H
+#define DEBUGFS_GT_H
+
+#include <linux/file.h>
+
+struct intel_gt;
+
+#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \
+ static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+void debugfs_gt_register(struct intel_gt *gt);
+
+struct debugfs_gt_file {
+ const char *name;
+ const struct file_operations *fops;
+ bool (*eval)(const struct intel_gt *gt);
+};
+
+void debugfs_gt_register_files(struct intel_gt *gt,
+ struct dentry *root,
+ const struct debugfs_gt_file *files,
+ unsigned long count);
+
+#endif /* DEBUGFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
new file mode 100644
index 000000000000..059c9e5c002e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/seq_file.h>
+
+#include "debugfs_gt.h"
+#include "debugfs_gt_pm.h"
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+#include "intel_runtime_pm.h"
+#include "intel_sideband.h"
+#include "intel_uncore.h"
+
+static int fw_domains_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
+
+ seq_printf(m, "user.bypass_count = %u\n",
+ uncore->user_forcewake_count);
+
+ for_each_fw_domain(fw_domain, uncore, tmp)
+ seq_printf(m, "%s.wake_count = %u\n",
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
+ READ_ONCE(fw_domain->wake_count));
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+
+static void print_rc6_res(struct seq_file *m,
+ const char *title,
+ const i915_reg_t reg)
+{
+ struct intel_gt *gt = m->private;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(gt->uncore, reg),
+ intel_rc6_residency_us(&gt->rc6, reg));
+}
+
+static int vlv_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rcctl1, pw_status;
+
+ pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_EI_MODE(1))));
+ seq_printf(m, "Render Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
+
+ print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+ print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
+
+ return fw_domains_show(m, NULL);
+}
+
+static int gen6_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 gt_core_status, rcctl1, rc6vids = 0;
+ u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
+
+ gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
+
+ rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ if (INTEL_GEN(i915) >= 9) {
+ gen9_powergate_enable =
+ intel_uncore_read(uncore, GEN9_PG_ENABLE);
+ gen9_powergate_status =
+ intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
+ }
+
+ if (INTEL_GEN(i915) <= 7)
+ sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+
+ seq_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
+ seq_printf(m, "Media Well Gating Enabled: %s\n",
+ yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
+ }
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_puts(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_puts(m, "Core Power Down\n");
+ else
+ seq_puts(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_puts(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_puts(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_puts(m, "RC7\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ if (INTEL_GEN(i915) >= 9) {
+ seq_printf(m, "Render Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
+ seq_printf(m, "Media Power Well: %s\n",
+ (gen9_powergate_status &
+ GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
+ }
+
+ /* Not exactly sure what this is */
+ print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+ GEN6_GT_GFX_RC6_LOCKED);
+ print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+ print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
+
+ if (INTEL_GEN(i915) <= 7) {
+ seq_printf(m, "RC6 voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+ seq_printf(m, "RC6+ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+ seq_printf(m, "RC6++ voltage: %dmV\n",
+ GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
+ }
+
+ return fw_domains_show(m, NULL);
+}
+
+static int ilk_drpc(struct seq_file *m)
+{
+ struct intel_gt *gt = m->private;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rgvmodectl, rstdbyctl;
+ u16 crstandvid;
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
+
+ seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno(rgvmodectl & MEMMODE_SWMODE_EN));
+ seq_printf(m, "Gated voltage change: %s\n",
+ yesno(rgvmodectl & MEMMODE_RCLK_GATE));
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max P-state: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+ seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
+ seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
+ seq_printf(m, "Render standby enabled: %s\n",
+ yesno(!(rstdbyctl & RCX_SW_EXIT)));
+ seq_puts(m, "Current RS state: ");
+ switch (rstdbyctl & RSX_STATUS_MASK) {
+ case RSX_STATUS_ON:
+ seq_puts(m, "on\n");
+ break;
+ case RSX_STATUS_RC1:
+ seq_puts(m, "RC1\n");
+ break;
+ case RSX_STATUS_RC1E:
+ seq_puts(m, "RC1E\n");
+ break;
+ case RSX_STATUS_RS1:
+ seq_puts(m, "RS1\n");
+ break;
+ case RSX_STATUS_RS2:
+ seq_puts(m, "RS2 (RC6)\n");
+ break;
+ case RSX_STATUS_RS3:
+ seq_puts(m, "RC3 (RC6+)\n");
+ break;
+ default:
+ seq_puts(m, "unknown\n");
+ break;
+ }
+
+ return 0;
+}
+
+static int drpc_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_drpc(m);
+ else if (INTEL_GEN(i915) >= 6)
+ err = gen6_drpc(m);
+ else
+ err = ilk_drpc(m);
+ }
+
+ return err;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(uncore->rpm);
+
+ if (IS_GEN(i915, 5)) {
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ u32 rpmodectl, freq_sts;
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ vlv_punit_get(i915);
+ freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+
+ seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+
+ seq_printf(m, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+
+ seq_printf(m, "max GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "min GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+
+ seq_printf(m, "idle GPU freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else if (INTEL_GEN(i915) >= 6) {
+ u32 rp_state_limits;
+ u32 gt_perf_status;
+ u32 rp_state_cap;
+ u32 rpmodectl, rpinclimit, rpdeclimit;
+ u32 rpstat, cagf, reqf;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
+ u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
+ int max_freq;
+
+ rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
+ if (IS_GEN9_LP(i915)) {
+ rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
+ } else {
+ rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+ gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
+ }
+
+ /* RPSTAT1 is in the GT power well */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
+ if (INTEL_GEN(i915) >= 9) {
+ reqf >>= 23;
+ } else {
+ reqf &= ~GEN6_TURBO_DISABLE;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ reqf >>= 24;
+ else
+ reqf >>= 25;
+ }
+ reqf = intel_gpu_freq(rps, reqf);
+
+ rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
+ rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
+ rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
+
+ rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
+ rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
+ cagf = intel_rps_read_actual_frequency(rps);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ if (INTEL_GEN(i915) >= 11) {
+ pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
+ /*
+ * The equivalent to the PM ISR & IIR cannot be read
+ * without affecting the current state of the system
+ */
+ pm_isr = 0;
+ pm_iir = 0;
+ } else if (INTEL_GEN(i915) >= 8) {
+ pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
+ pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
+ pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
+ pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
+ } else {
+ pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+ pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
+ pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
+ }
+ pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+
+ seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ pm_ier, pm_imr, pm_mask);
+ if (INTEL_GEN(i915) <= 10)
+ seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ pm_isr, pm_iir);
+ seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ rps->pm_intrmsk_mbz);
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
+ seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
+ seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
+ seq_printf(m, "CAGF: %dMHz\n", cagf);
+ seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+ rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dus)\n",
+ rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dus)\n",
+ rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
+ seq_printf(m, "Up threshold: %d%%\n",
+ rps->power.up_threshold);
+
+ seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+ rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+ rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+ rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
+ seq_printf(m, "Down threshold: %d%%\n",
+ rps->power.down_threshold);
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
+ rp_state_cap >> 16) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+
+ max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
+ rp_state_cap >> 0) & 0xff;
+ max_freq *= (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ intel_gpu_freq(rps, max_freq));
+ seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+
+ seq_printf(m, "Current freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->cur_freq));
+ seq_printf(m, "Actual freq: %d MHz\n", cagf);
+ seq_printf(m, "Idle freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->idle_freq));
+ seq_printf(m, "Min freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->min_freq));
+ seq_printf(m, "Boost freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->boost_freq));
+ seq_printf(m, "Max freq: %d MHz\n",
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(rps, rps->efficient_freq));
+ } else {
+ seq_puts(m, "no P-state info available\n");
+ }
+
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+ seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+
+static int llc_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ const bool edram = INTEL_GEN(i915) > 8;
+ struct intel_rps *rps = &gt->rps;
+ unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
+ int gpu_freq, ia_freq;
+
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ i915->edram_size_mb);
+
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Convert GT frequency to 50 HZ units */
+ min_gpu_freq /= GEN9_FREQ_SCALER;
+ max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
+ ia_freq = gpu_freq;
+ sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &ia_freq, NULL);
+ seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(i915) ||
+ INTEL_GEN(i915) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
+ ((ia_freq >> 0) & 0xff) * 100,
+ ((ia_freq >> 8) & 0xff) * 100);
+ }
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return 0;
+}
+
+static bool llc_eval(const struct intel_gt *gt)
+{
+ return HAS_LLC(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+
+static const char *rps_power_to_str(unsigned int power)
+{
+ static const char * const strings[] = {
+ [LOW_POWER] = "low power",
+ [BETWEEN] = "mixed",
+ [HIGH_POWER] = "high power",
+ };
+
+ if (power >= ARRAY_SIZE(strings) || !strings[power])
+ return "unknown";
+
+ return strings[power];
+}
+
+static int rps_boost_show(struct seq_file *m, void *data)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_rps *rps = &gt->rps;
+
+ seq_printf(m, "RPS enabled? %d\n", rps->enabled);
+ seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&rps->num_waiters));
+ seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_rps_read_actual_frequency(rps));
+ seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
+ seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
+
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
+
+ if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
+ struct intel_uncore *uncore = gt->uncore;
+ u32 rpup, rpupei;
+ u32 rpdown, rpdownei;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
+ rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
+ rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
+ rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
+ rps_power_to_str(rps->power.mode));
+ seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
+ rpup && rpupei ? 100 * rpup / rpupei : 0,
+ rps->power.up_threshold);
+ seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
+ rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
+ rps->power.down_threshold);
+ } else {
+ seq_puts(m, "\nRPS Autotuning inactive\n");
+ }
+
+ return 0;
+}
+
+static bool rps_eval(const struct intel_gt *gt)
+{
+ return HAS_RPS(gt->i915);
+}
+
+DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct debugfs_gt_file files[] = {
+ { "drpc", &drpc_fops, NULL },
+ { "frequency", &frequency_fops, NULL },
+ { "forcewake", &fw_domains_fops, NULL },
+ { "llc", &llc_fops, llc_eval },
+ { "rps_boost", &rps_boost_fops, rps_eval },
+ };
+
+ debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
new file mode 100644
index 000000000000..4cf5f5c9da7d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef DEBUGFS_GT_PM_H
+#define DEBUGFS_GT_PM_H
+
+struct intel_gt;
+struct dentry;
+
+void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 55317081d48b..0ba524a414c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine)
{
@@ -53,15 +55,17 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
- irq_disable(container_of(b,
- struct intel_engine_cs,
- breadcrumbs));
+ irq_disable(engine);
b->irq_armed = false;
+ intel_gt_pm_put_async(engine->gt);
}
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -127,16 +131,23 @@ __dma_fence_signal__notify(struct dma_fence *fence,
}
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ intel_engine_add_retire(engine, tl);
+}
+
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
- unsigned long flags;
LIST_HEAD(signal);
- spin_lock_irqsave(&b->irq_lock, flags);
+ spin_lock(&b->irq_lock);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@@ -177,44 +188,41 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
if (!list_is_first(pos, &ce->signals)) {
/* Advance the list to the first incomplete request */
__list_del_many(&ce->signals, pos);
- if (&ce->signals == pos) /* now empty */
+ if (&ce->signals == pos) { /* now empty */
list_del_init(&ce->signal_link);
+ add_retire(b, ce->timeline);
+ }
}
}
- spin_unlock_irqrestore(&b->irq_lock, flags);
+ spin_unlock(&b->irq_lock);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
- spin_lock_irqsave(&rq->lock, flags);
+ spin_lock(&rq->lock);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
- spin_unlock_irqrestore(&rq->lock, flags);
+ spin_unlock(&rq->lock);
i915_request_put(rq);
}
}
-static void signal_irq_work(struct irq_work *work)
-{
- struct intel_engine_cs *engine =
- container_of(work, typeof(*engine), breadcrumbs.irq_work);
-
- intel_engine_breadcrumbs_irq(engine);
-}
-
-static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
+static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
- return;
+ return true;
+
+ if (!intel_gt_pm_get_if_awake(engine->gt))
+ return false;
/*
* The breadcrumb irq will be disarmed on the interrupt after the
@@ -234,6 +242,8 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
if (!b->irq_enabled++)
irq_enable(engine);
+
+ return true;
}
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
@@ -271,19 +281,20 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
struct list_head *pos;
spin_lock(&b->irq_lock);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
- __intel_breadcrumbs_arm_irq(b);
+ if (!__intel_breadcrumbs_arm_irq(b))
+ goto unlock;
/*
* We keep the seqno in retirement order, so we can break
- * inside intel_engine_breadcrumbs_irq as soon as we've passed
- * the last completed request (or seen a request that hasn't
- * event started). We could iterate the timeline->requests list,
+ * inside intel_engine_signal_breadcrumbs as soon as we've
+ * passed the last completed request (or seen a request that
+ * hasn't event started). We could walk the timeline->requests,
* but keeping a separate signalers_list has the advantage of
* hopefully being much smaller than the full list and so
* provides faster iteration and detection when there are no
@@ -306,6 +317,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+unlock:
spin_unlock(&b->irq_lock);
}
@@ -326,7 +338,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
*/
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
list_del(&rq->signal_link);
if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ef7bc41ffffa..fbaa9df6f436 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
}
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -40,7 +39,7 @@ intel_context_create(struct i915_gem_context *ctx,
if (!ce)
return ERR_PTR(-ENOMEM);
- intel_context_init(ce, ctx, engine);
+ intel_context_init(ce, engine);
return ce;
}
@@ -68,11 +67,8 @@ int __intel_context_do_pin(struct intel_context *ce)
if (err)
goto err;
- GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
- ce->engine->name, ce->timeline->fence_context,
- ce->ring->head, ce->ring->tail);
-
- i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
+ CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
@@ -98,12 +94,10 @@ void intel_context_unpin(struct intel_context *ce)
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
+ CE_TRACE(ce, "retire\n");
ce->ops->unpin(ce);
- i915_gem_context_put(ce->gem_context);
intel_context_active_release(ce);
}
@@ -113,13 +107,10 @@ void intel_context_unpin(struct intel_context *ce)
static int __context_pin_state(struct i915_vma *vma)
{
- u64 flags;
+ unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
int err;
- flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
- flags |= PIN_HIGH | PIN_GLOBAL;
-
- err = i915_vma_pin(vma, 0, 0, flags);
+ err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
if (err)
return err;
@@ -144,9 +135,9 @@ static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- GEM_TRACE("%s context:%llx retire\n",
- ce->engine->name, ce->timeline->fence_context);
+ CE_TRACE(ce, "retire\n");
+ set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
__context_unpin_state(ce->state);
@@ -198,7 +189,7 @@ int intel_context_active_acquire(struct intel_context *ce)
return err;
/* Preallocate tracking nodes */
- if (!i915_gem_context_is_kernel(ce->gem_context)) {
+ if (!intel_context_is_barrier(ce)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err) {
@@ -219,30 +210,19 @@ void intel_context_active_release(struct intel_context *ce)
void
intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct i915_address_space *vm;
-
GEM_BUG_ON(!engine->cops);
+ GEM_BUG_ON(!engine->gt->vm);
kref_init(&ce->ref);
- ce->gem_context = ctx;
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm);
- if (vm)
- ce->vm = i915_vm_get(vm);
- else
- ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
- rcu_read_unlock();
- if (ctx->timeline)
- ce->timeline = intel_timeline_get(ctx->timeline);
-
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
- ce->ring = __intel_context_ring_size(SZ_16K);
+ ce->ring = __intel_context_ring_size(SZ_4K);
+
+ ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
@@ -307,30 +287,11 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
int err;
/* Only suitable for use in remotely modifying this context */
- GEM_BUG_ON(rq->hw_context == ce);
+ GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
- /*
- * Ideally, we just want to insert our foreign fence as
- * a barrier into the remove context, such that this operation
- * occurs after all current operations in that context, and
- * all future operations must occur after this.
- *
- * Currently, the timeline->last_request tracking is guarded
- * by its mutex and so we must obtain that to atomically
- * insert our barrier. However, since we already hold our
- * timeline->mutex, we must be careful against potential
- * inversion if we are the kernel_context as the remote context
- * will itself poke at the kernel_context when it needs to
- * unpin. Ergo, if already locked, we drop both locks and
- * try again (through the magic of userspace repeating EAGAIN).
- */
- if (!mutex_trylock(&tl->mutex))
- return -EAGAIN;
-
/* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq);
- mutex_unlock(&tl->mutex);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 68b3d317d959..1d4a1b1357cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,7 +7,9 @@
#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__
+#include <linux/bitops.h>
#include <linux/lockdep.h>
+#include <linux/types.h>
#include "i915_active.h"
#include "intel_context_types.h"
@@ -15,14 +17,19 @@
#include "intel_ring_types.h"
#include "intel_timeline_types.h"
+#define CE_TRACE(ce, fmt, ...) do { \
+ const struct intel_context *ce__ = (ce); \
+ ENGINE_TRACE(ce__->engine, "context:%llx" fmt, \
+ ce__->timeline->fence_context, \
+ ##__VA_ARGS__); \
+} while (0)
+
void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
void intel_context_fini(struct intel_context *ce);
struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
void intel_context_free(struct intel_context *ce);
@@ -153,4 +160,64 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
return u64_to_ptr(struct intel_ring, sz);
}
+static inline bool intel_context_is_barrier(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
+}
+
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+ set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+ return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+ return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+ clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6959b05ae5f8..9527a659546c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -44,7 +44,7 @@ struct intel_context {
#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
struct i915_address_space *vm;
- struct i915_gem_context *gem_context;
+ struct i915_gem_context __rcu *gem_context;
struct list_head signal_link;
struct list_head signals;
@@ -54,7 +54,13 @@ struct intel_context {
struct intel_timeline *timeline;
unsigned long flags;
-#define CONTEXT_ALLOC_BIT 0
+#define CONTEXT_BARRIER_BIT 0
+#define CONTEXT_ALLOC_BIT 1
+#define CONTEXT_VALID_BIT 2
+#define CONTEXT_USE_SEMAPHORES 3
+#define CONTEXT_BANNED 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+#define CONTEXT_NOPREEMPT 6
u32 *lrc_reg_state;
u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 01765a7ec18f..79ecac5ac0ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -29,6 +29,13 @@ struct intel_gt;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+#define ENGINE_TRACE(e, fmt, ...) do { \
+ const struct intel_engine_cs *e__ __maybe_unused = (e); \
+ GEM_TRACE("%s %s: " fmt, \
+ dev_name(e__->i915->drm.dev), e__->name, \
+ ##__VA_ARGS__); \
+} while (0)
+
/*
* The register defines to be used with the following macros need to accept a
* base param, e.g:
@@ -177,15 +184,15 @@ void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct intel_gt *gt);
-int intel_engines_setup(struct intel_gt *gt);
int intel_engines_init(struct intel_gt *gt);
-void intel_engines_cleanup(struct intel_gt *gt);
+
+void intel_engines_release(struct intel_gt *gt);
+void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine);
-int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
@@ -206,13 +213,11 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
-intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
+intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
-
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -270,14 +275,14 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool stalled)
{
- if (engine->reset.reset)
- engine->reset.reset(engine, stalled);
+ if (engine->reset.rewind)
+ engine->reset.rewind(engine, stalled);
engine->serial++; /* contexts lost */
}
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void intel_engine_flush_submission(struct intel_engine_cs *engine);
+bool intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
@@ -296,7 +301,7 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
-u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 813bd3a610d2..ddf9543b1261 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -141,7 +141,7 @@ static const struct engine_info intel_engines[] = {
/**
* intel_engine_context_size() - return the size of the context for an engine
- * @dev_priv: i915 device private
+ * @gt: the gt
* @class: engine class
*
* Each engine class may require a different amount of space for a context
@@ -153,17 +153,18 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) {
case RENDER_CLASS:
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(gt->i915)) {
default:
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(gt->i915));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 12:
case 11:
@@ -175,14 +176,14 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 8:
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
- if (IS_HASWELL(dev_priv))
+ if (IS_HASWELL(gt->i915))
return HSW_CXT_TOTAL_SIZE;
- cxt_size = I915_READ(GEN7_CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 6:
- cxt_size = I915_READ(CXT_SIZE);
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE);
return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 5:
@@ -197,9 +198,9 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
* minimum allocation anyway so it should all come
* out in the wash.
*/
- cxt_size = I915_READ(CXT_SIZE) + 1;
+ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(dev_priv),
+ INTEL_GEN(gt->i915),
cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
@@ -216,7 +217,7 @@ u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
- if (INTEL_GEN(dev_priv) < 8)
+ if (INTEL_GEN(gt->i915) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
@@ -318,14 +319,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
- /*
- * To be overridden by the backend on setup. However to facilitate
- * cleanup on error during setup, we always provide the destroy vfunc.
- */
- engine->destroy = (typeof(engine->destroy))kfree;
-
- engine->context_size = intel_engine_context_size(gt->i915,
- engine->class);
+ engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
@@ -334,6 +328,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ ewma__engine_latency_init(&engine->latency);
seqlock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
@@ -344,7 +339,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- intel_engine_add_user(engine);
gt->i915->engine[id] = engine;
return 0;
@@ -390,21 +384,39 @@ static void intel_setup_engine_capabilities(struct intel_gt *gt)
}
/**
- * intel_engines_cleanup() - free the resources allocated for Command Streamers
+ * intel_engines_release() - free the resources allocated for Command Streamers
* @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct intel_gt *gt)
+void intel_engines_release(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
- engine->destroy(engine);
- gt->engine[id] = NULL;
+ if (!engine->release)
+ continue;
+
+ engine->release(engine);
+ engine->release = NULL;
+
+ memset(&engine->reset, 0, sizeof(engine->reset));
+
gt->i915->engine[id] = NULL;
}
}
+void intel_engines_free(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id) {
+ kfree(engine);
+ gt->engine[id] = NULL;
+ }
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -455,38 +467,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
return 0;
cleanup:
- intel_engines_cleanup(gt);
- return err;
-}
-
-/**
- * intel_engines_init() - init the Engine Command Streamers
- * @gt: pointer to struct intel_gt
- *
- * Return: non-zero if the initialization failed.
- */
-int intel_engines_init(struct intel_gt *gt)
-{
- int (*init)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(gt->i915))
- init = intel_execlists_submission_init;
- else
- init = intel_ring_submission_init;
-
- for_each_engine(engine, gt, id) {
- err = init(engine);
- if (err)
- goto cleanup;
- }
-
- return 0;
-
-cleanup:
- intel_engines_cleanup(gt);
+ intel_engines_free(gt);
return err;
}
@@ -601,7 +582,7 @@ err:
return ret;
}
-static int intel_engine_setup_common(struct intel_engine_cs *engine)
+static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
@@ -631,49 +612,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
return 0;
}
-/**
- * intel_engines_setup- setup engine state not requiring hw access
- * @gt: pointer to struct intel_gt
- *
- * Initializes engine structure members shared between legacy and execlists
- * submission modes which do not require hardware access.
- *
- * Typically done early in the submission mode specific engine setup stage.
- */
-int intel_engines_setup(struct intel_gt *gt)
-{
- int (*setup)(struct intel_engine_cs *engine);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- if (HAS_EXECLISTS(gt->i915))
- setup = intel_execlists_submission_setup;
- else
- setup = intel_ring_submission_setup;
-
- for_each_engine(engine, gt, id) {
- err = intel_engine_setup_common(engine);
- if (err)
- goto cleanup;
-
- err = setup(engine);
- if (err)
- goto cleanup;
-
- /* We expect the backend to take control over its state */
- GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
-
- GEM_BUG_ON(!engine->cops);
- }
-
- return 0;
-
-cleanup:
- intel_engines_cleanup(gt);
- return err;
-}
-
struct measure_breadcrumb {
struct i915_request rq;
struct intel_timeline timeline;
@@ -757,13 +695,13 @@ create_kernel_context(struct intel_engine_cs *engine)
struct intel_context *ce;
int err;
- ce = intel_context_create(engine->i915->kernel_context, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
- ce->ring = __intel_context_ring_size(SZ_4K);
+ __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
- err = intel_context_pin(ce);
+ err = intel_context_pin(ce); /* perma-pin so it is always available */
if (err) {
intel_context_put(ce);
return ERR_PTR(err);
@@ -791,13 +729,19 @@ create_kernel_context(struct intel_engine_cs *engine)
*
* Returns zero on success or an error code on failure.
*/
-int intel_engine_init_common(struct intel_engine_cs *engine)
+static int engine_init_common(struct intel_engine_cs *engine)
{
struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
+ ret = measure_breadcrumb_dw(engine);
+ if (ret < 0)
+ return ret;
+
+ engine->emit_fini_breadcrumb_dw = ret;
+
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
@@ -812,18 +756,38 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
engine->kernel_context = ce;
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- goto err_unpin;
+ return 0;
+}
- engine->emit_fini_breadcrumb_dw = ret;
+int intel_engines_init(struct intel_gt *gt)
+{
+ int (*setup)(struct intel_engine_cs *engine);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
- return 0;
+ if (HAS_EXECLISTS(gt->i915))
+ setup = intel_execlists_submission_setup;
+ else
+ setup = intel_ring_submission_setup;
-err_unpin:
- intel_context_unpin(ce);
- intel_context_put(ce);
- return ret;
+ for_each_engine(engine, gt, id) {
+ err = engine_setup_common(engine);
+ if (err)
+ return err;
+
+ err = setup(engine);
+ if (err)
+ return err;
+
+ err = engine_init_common(engine);
+ if (err)
+ return err;
+
+ intel_engine_add_user(engine);
+ }
+
+ return 0;
}
/**
@@ -836,6 +800,7 @@ err_unpin:
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->active.requests));
+ tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
cleanup_status_page(engine);
@@ -911,7 +876,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
@@ -920,7 +885,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
mode, MODE_IDLE, MODE_IDLE,
1000, stop_timeout(engine),
NULL)) {
- GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
err = -ETIMEDOUT;
}
@@ -932,7 +897,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -1082,9 +1047,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void intel_engine_flush_submission(struct intel_engine_cs *engine)
+bool intel_engine_flush_submission(struct intel_engine_cs *engine)
{
struct tasklet_struct *t = &engine->execlists.tasklet;
+ bool active = tasklet_is_locked(t);
if (__tasklet_is_scheduled(t)) {
local_bh_disable();
@@ -1095,10 +1061,13 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t);
}
local_bh_enable();
+ active = true;
}
/* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t);
+
+ return active;
}
/**
@@ -1478,6 +1447,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
+ drm_printf(m, "\tBarriers?: %s\n",
+ yesno(!llist_empty(&engine->barrier_tasks)));
+ drm_printf(m, "\tLatency: %luus\n",
+ ewma__engine_latency_read(&engine->latency));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
@@ -1517,9 +1490,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
- if (rq->hw_context->lrc_reg_state) {
+ if (rq->context->lrc_reg_state) {
drm_printf(m, "Logical Ring Context:\n");
- hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+ hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -1580,7 +1553,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
for (port = execlists->pending; (rq = *port); port++) {
/* Exclude any contexts already counted in active */
- if (!intel_context_inflight_count(rq->hw_context))
+ if (!intel_context_inflight_count(rq->context))
engine->stats.active++;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 06aa14c7aa8c..742628e40201 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -63,15 +63,15 @@ static void heartbeat(struct work_struct *wrk)
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- if (!intel_engine_pm_get_if_awake(engine))
- return;
-
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
i915_request_put(rq);
engine->heartbeat.systole = NULL;
}
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
if (intel_gt_is_wedged(engine->gt))
goto out;
@@ -215,18 +215,26 @@ out_rpm:
int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{
struct i915_request *rq;
+ int err = 0;
if (llist_empty(&engine->barrier_tasks))
return 0;
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_rpm;
+ }
idle_pulse(engine, rq);
i915_request_add(rq);
- return 0;
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index c1dd0cd3efc7..010620b78202 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
@@ -21,7 +22,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
container_of(wf, typeof(*engine), wakeref);
void *map;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
@@ -73,6 +74,15 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
+static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_request *rq = to_request(fence);
+
+ ewma__engine_latency_add(&rq->engine->latency,
+ ktime_us_delta(rq->fence.timestamp,
+ rq->duration.emitted));
+}
+
static void
__queue_and_release_pm(struct i915_request *rq,
struct intel_timeline *tl,
@@ -80,7 +90,7 @@ __queue_and_release_pm(struct i915_request *rq,
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* We have to serialise all potential retirement paths with our
@@ -113,6 +123,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
unsigned long flags;
bool result = true;
+ GEM_BUG_ON(!intel_context_is_barrier(ce));
+
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
return true;
@@ -163,7 +175,18 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Install ourselves as a preemption barrier */
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
- __i915_request_commit(rq);
+ if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
+ /*
+ * Use an interrupt for precise measurement of duration,
+ * otherwise we rely on someone else retiring all the requests
+ * which may delay the signaling (i.e. we will likely wait
+ * until the background request retirement running every
+ * second or two).
+ */
+ BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
+ dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
+ rq->duration.emitted = ktime_get();
+ }
/* Expose ourselves to the world */
__queue_and_release_pm(rq, ce->timeline, engine);
@@ -183,7 +206,7 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
container_of((struct list_head *)node,
typeof(*cb), node);
- cb->func(NULL, cb);
+ cb->func(ERR_PTR(-EAGAIN), cb);
}
}
@@ -204,7 +227,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
call_idle_barriers(engine); /* cleanup after wedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 24e20344dc22..e52c2b0cb245 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,6 +7,7 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
@@ -41,6 +42,26 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref);
}
+static inline struct i915_request *
+intel_engine_create_kernel_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /*
+ * The engine->kernel_context is special as it is used inside
+ * the engine-pm barrier (see __engine_park()), circumventing
+ * the usual mutexes and relying on the engine-pm barrier
+ * instead. So whenever we use the engine->kernel_context
+ * outside of the barrier, we must manually handle the
+ * engine wakeref to serialise with the use inside.
+ */
+ intel_engine_pm_get(engine);
+ rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
+
+ return rq;
+}
+
void intel_engine_init__pm(struct intel_engine_cs *engine);
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 17f1f1441efc..00287515e7af 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -7,6 +7,7 @@
#ifndef __INTEL_ENGINE_TYPES__
#define __INTEL_ENGINE_TYPES__
+#include <linux/average.h>
#include <linux/hashtable.h>
#include <linux/irq_work.h>
#include <linux/kref.h>
@@ -119,6 +120,9 @@ enum intel_engine_id {
#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
+/* A simple estimator for the round-trip latency of an engine */
+DECLARE_EWMA(_engine_latency, 6, 4)
+
struct st_preempt_hang {
struct completion completion;
unsigned int count;
@@ -316,6 +320,13 @@ struct intel_engine_cs {
struct intel_timeline *timeline;
} legacy;
+ /*
+ * We track the average duration of the idle pulse on parking the
+ * engine to keep an estimate of the how the fast the engine is
+ * under ideal conditions.
+ */
+ struct ewma__engine_latency latency;
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -389,7 +400,10 @@ struct intel_engine_cs {
struct {
void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
+
+ void (*rewind)(struct intel_engine_cs *engine, bool stalled);
+ void (*cancel)(struct intel_engine_cs *engine);
+
void (*finish)(struct intel_engine_cs *engine);
} reset;
@@ -439,15 +453,7 @@ struct intel_engine_cs {
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*destroy)(struct intel_engine_cs *engine);
+ void (*release)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 4294f146f13c..51b8718513bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -7,6 +7,8 @@
#ifndef _INTEL_GPU_COMMANDS_H_
#define _INTEL_GPU_COMMANDS_H_
+#include <linux/bitops.h>
+
/*
* Target address alignments required for GPU access e.g.
* MI_STORE_DWORD_IMM.
@@ -319,4 +321,31 @@
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+/*
+ * Used to convert any address to canonical form.
+ * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+ * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
+ * addresses to be in a canonical form:
+ * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
+ * canonical form [63:48] == [47]."
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static inline u64 gen8_canonical_addr(u64 address)
+{
+ return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
+}
+
+static inline u64 gen8_noncanonical_addr(u64 address)
+{
+ return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
+}
+
+static inline u32 *__gen6_emit_bb_start(u32 *cs, u32 addr, unsigned int flags)
+{
+ *cs++ = MI_BATCH_BUFFER_START | flags;
+ *cs++ = addr;
+
+ return cs;
+}
+
#endif /* _INTEL_GPU_COMMANDS_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 4c26daf7ee46..ec84b5e62fef 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,12 +3,15 @@
* Copyright © 2019 Intel Corporation
*/
+#include "debugfs_gt.h"
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mocs.h"
#include "intel_rc6.h"
+#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_uncore.h"
#include "intel_pm.h"
@@ -25,6 +28,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
+ intel_gt_init_timelines(gt);
intel_gt_pm_init_early(gt);
intel_rps_init_early(&gt->rps);
@@ -73,7 +77,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
int ret;
- BUG_ON(!i915->kernel_context);
ret = intel_gt_terminally_wedged(gt);
if (ret)
return ret;
@@ -303,7 +306,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(uncore->rpm, wakeref) {
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
unsigned long flags;
spin_lock_irqsave(&uncore->lock, flags);
@@ -323,6 +326,8 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
void intel_gt_driver_register(struct intel_gt *gt)
{
intel_rps_driver_register(&gt->rps);
+
+ debugfs_gt_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -364,22 +369,273 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
i915_vma_unpin_and_release(&gt->scratch, 0);
}
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+ if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+ return &i915_ppgtt_create(gt->i915)->vm;
+ else
+ return i915_vm_get(&gt->ggtt->vm);
+}
+
+static int __intel_context_flush_retire(struct intel_context *ce)
+{
+ struct intel_timeline *tl;
+
+ tl = intel_context_timeline_lock(ce);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
+
+ intel_context_timeline_unlock(tl);
+ return 0;
+}
+
+static int __engines_record_defaults(struct intel_gt *gt)
+{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * As we reset the gpu during very early sanitisation, the current
+ * register state on the GPU should reflect its defaults values.
+ * We load a context onto the hw (with restore-inhibit), then switch
+ * over to a second context to save that default register state. We
+ * can then prime every new context with that state so they all start
+ * from the same default HW values.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_renderstate so;
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ err = intel_renderstate_init(&so, engine);
+ if (err)
+ goto out;
+
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+ engine->serial++; /* force the kernel context switch */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ intel_context_put(ce);
+ goto out;
+ }
+
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ err = intel_renderstate_emit(&so, rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
+ i915_request_add(rq);
+ intel_renderstate_fini(&so);
+ if (err)
+ goto out;
+ }
+
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ err = -EIO;
+ goto out;
+ }
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
+ void *vaddr;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
+ state = rq->context->state;
+ if (!state)
+ continue;
+
+ /* Serialise with retirement on another CPU */
+ GEM_BUG_ON(!i915_request_completed(rq));
+ err = __intel_context_flush_retire(rq->context);
+ if (err)
+ goto out;
+
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->context));
+
+ /*
+ * As we will hold a reference to the logical state, it will
+ * not be torn down with the context, and importantly the
+ * object will hold onto its vma (making it possible for a
+ * stray GTT write to corrupt our defaults). Unmap the vma
+ * from the GTT to prevent such accidents and reclaim the
+ * space.
+ */
+ err = i915_vma_unbind(state);
+ if (err)
+ goto out;
+
+ i915_gem_object_lock(state->obj);
+ err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ i915_gem_object_unlock(state->obj);
+ if (err)
+ goto out;
+
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
+
+ /* Check we can acquire the image of the context state */
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
+ }
+
+out:
+ /*
+ * If we have to abandon now, we expect the engines to be idle
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
+ */
+ if (err)
+ intel_gt_set_wedged(gt);
+
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ rq = requests[id];
+ if (!rq)
+ continue;
+
+ ce = rq->context;
+ i915_request_put(rq);
+ intel_context_put(ce);
+ }
+ return err;
+}
+
+static int __engines_verify_workarounds(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_verify_workarounds(engine, "load"))
+ err = -EIO;
+ }
+
+ return err;
+}
+
+static void __intel_gt_disable(struct intel_gt *gt)
+{
+ intel_gt_set_wedged_on_init(gt);
+
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+}
+
int intel_gt_init(struct intel_gt *gt)
{
int err;
- err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ err = i915_inject_probe_error(gt->i915, -ENODEV);
if (err)
return err;
+ /*
+ * This is just a security blanket to placate dragons.
+ * On some systems, we very sporadically observe that the first TLBs
+ * used by the CS may be stale, despite us poking the TLB reset. If
+ * we hold the forcewake during initialisation these problems
+ * just magically go away.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ goto out_fw;
+
intel_gt_pm_init(gt);
- return 0;
+ gt->vm = kernel_vm(gt);
+ if (!gt->vm) {
+ err = -ENOMEM;
+ goto err_pm;
+ }
+
+ err = intel_engines_init(gt);
+ if (err)
+ goto err_engines;
+
+ intel_uc_init(&gt->uc);
+
+ err = intel_gt_resume(gt);
+ if (err)
+ goto err_uc_init;
+
+ err = __engines_record_defaults(gt);
+ if (err)
+ goto err_gt;
+
+ err = __engines_verify_workarounds(gt);
+ if (err)
+ goto err_gt;
+
+ err = i915_inject_probe_error(gt->i915, -EIO);
+ if (err)
+ goto err_gt;
+
+ goto out_fw;
+err_gt:
+ __intel_gt_disable(gt);
+ intel_uc_fini_hw(&gt->uc);
+err_uc_init:
+ intel_uc_fini(&gt->uc);
+err_engines:
+ intel_engines_release(gt);
+ i915_vm_put(fetch_and_zero(&gt->vm));
+err_pm:
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+out_fw:
+ if (err)
+ intel_gt_set_wedged_on_init(gt);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ return err;
}
void intel_gt_driver_remove(struct intel_gt *gt)
{
- GEM_BUG_ON(gt->awake);
+ __intel_gt_disable(gt);
+
+ intel_uc_fini_hw(&gt->uc);
+ intel_uc_fini(&gt->uc);
+
+ intel_engines_release(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
@@ -389,6 +645,12 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
void intel_gt_driver_release(struct intel_gt *gt)
{
+ struct i915_address_space *vm;
+
+ vm = fetch_and_zero(&gt->vm);
+ if (vm) /* FIXME being called twice on error paths :( */
+ i915_vm_put(vm);
+
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
}
@@ -396,5 +658,8 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_engines_free(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 5436f8c30708..2355cf129e9c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -12,6 +12,12 @@
struct drm_i915_private;
+#define GT_TRACE(gt, fmt, ...) do { \
+ const struct intel_gt *gt__ __maybe_unused = (gt); \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ ##__VA_ARGS__); \
+} while (0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 973ee7eded64..f796bdf1ed30 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -28,7 +28,7 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
- intel_engine_breadcrumbs_irq(engine);
+ intel_engine_signal_breadcrumbs(engine);
tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
@@ -245,9 +245,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
@@ -271,11 +271,11 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
+ intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index a459a42ad5c2..45b68a17da4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -43,7 +43,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
i915_globals_unpark();
@@ -61,9 +61,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
- if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
+ intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
@@ -78,24 +76,21 @@ static int __gt_park(struct intel_wakeref *wf)
intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
struct drm_i915_private *i915 = gt->i915;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
intel_gt_park_requests(gt);
i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
intel_rps_park(&gt->rps);
+ intel_rc6_park(&gt->rc6);
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
- if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
- intel_rc6_ctx_wa_check(&i915->gt.rc6);
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- }
-
+ /* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
i915_globals_park();
@@ -147,7 +142,7 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GEM_TRACE("force:%s\n", yesno(force));
+ GT_TRACE(gt, "force:%s", yesno(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -192,9 +187,9 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err = 0;
+ int err;
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
/*
* After resume, we may need to poke into the pinned kernel
@@ -207,6 +202,15 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
+ /* Only when the HW is re-initialised, can we replay the requests */
+ err = intel_gt_init_hw(gt);
+ if (err) {
+ dev_err(gt->i915->drm.dev,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(gt);
+ goto err_fw;
+ }
+
intel_rps_enable(&gt->rps);
intel_llc_enable(&gt->llc);
@@ -239,6 +243,7 @@ int intel_gt_resume(struct intel_gt *gt)
user_forcewake(gt, false);
+err_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
@@ -256,6 +261,7 @@ static void wait_for_suspend(struct intel_gt *gt)
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
+ intel_gt_retire_requests(gt);
}
intel_gt_pm_wait_for_idle(gt);
@@ -285,6 +291,11 @@ void intel_gt_suspend_late(struct intel_gt *gt)
/* We expect to be idle already; but also want to be independent */
wait_for_suspend(gt);
+ if (is_mock_gt(gt))
+ return;
+
+ GEM_BUG_ON(gt->awake);
+
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
@@ -306,20 +317,19 @@ void intel_gt_suspend_late(struct intel_gt *gt)
intel_gt_sanitize(gt, false);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
- GEM_TRACE("\n");
+ GT_TRACE(gt, "\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
- GEM_TRACE("\n");
-
+ GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 990efc27a4e4..4a9e48c12bd4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,11 @@ static inline void intel_gt_pm_get(struct intel_gt *gt)
intel_wakeref_get(&gt->wakeref);
}
+static inline void __intel_gt_pm_get(struct intel_gt *gt)
+{
+ __intel_wakeref_get(&gt->wakeref);
+}
+
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
return intel_wakeref_get_if_active(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 3dc13ecf41bf..b4f04614230e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -8,6 +8,7 @@
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
+#include "intel_engine_heartbeat.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -22,13 +23,18 @@ static void retire_requests(struct intel_timeline *tl)
break;
}
-static void flush_submission(struct intel_gt *gt)
+static bool flush_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ bool active = false;
- for_each_engine(engine, gt, id)
- intel_engine_flush_submission(engine);
+ for_each_engine(engine, gt, id) {
+ active |= intel_engine_flush_submission(engine);
+ active |= flush_work(&engine->retire_work);
+ }
+
+ return active;
}
static void engine_retire(struct work_struct *work)
@@ -62,19 +68,16 @@ static void engine_retire(struct work_struct *work)
static bool add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
+#define STUB ((struct intel_timeline *)1)
struct intel_timeline *first;
/*
* We open-code a llist here to include the additional tag [BIT(0)]
* so that we know when the timeline is already on a
* retirement queue: either this engine or another.
- *
- * However, we rely on that a timeline can only be active on a single
- * engine at any one time and that add_retire() is called before the
- * engine releases the timeline and transferred to another to retire.
*/
- if (READ_ONCE(tl->retire)) /* already queued */
+ if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
return false;
intel_timeline_get(tl);
@@ -109,7 +112,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
- unsigned long flags;
bool interruptible;
LIST_HEAD(free);
@@ -119,7 +121,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
flush_submission(gt); /* kick the ksoftirqd tasklets */
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
active_count++; /* report busy to caller, try again? */
@@ -129,7 +131,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
if (timeout > 0) {
struct dma_fence *fence;
@@ -145,14 +147,14 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
retire_requests(tl);
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
else
- active_count += !!rcu_access_pointer(tl->last_request.fence);
+ active_count += i915_active_fence_isset(&tl->last_request);
mutex_unlock(&tl->mutex);
@@ -162,11 +164,14 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_add(&tl->link, &free);
}
}
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
+ if (flush_submission(gt))
+ active_count++;
+
return active_count ? timeout : 0;
}
@@ -190,9 +195,9 @@ static void retire_work_handler(struct work_struct *work)
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
- intel_gt_retire_requests(gt);
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
+ intel_gt_retire_requests(gt);
}
void intel_gt_init_requests(struct intel_gt *gt)
@@ -210,3 +215,9 @@ void intel_gt_unpark_requests(struct intel_gt *gt)
schedule_delayed_work(&gt->requests.retire_work,
round_jiffies_up_relative(HZ));
}
+
+void intel_gt_fini_requests(struct intel_gt *gt)
+{
+ /* Wait until the work is marked as finished before unloading! */
+ cancel_delayed_work_sync(&gt->requests.retire_work);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
index d626fb115386..dbac53baf1cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -27,5 +27,6 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
void intel_gt_init_requests(struct intel_gt *gt);
void intel_gt_park_requests(struct intel_gt *gt);
void intel_gt_unpark_requests(struct intel_gt *gt);
+void intel_gt_fini_requests(struct intel_gt *gt);
#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index d4e14dbd172e..96890dd12b5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -90,6 +90,13 @@ struct intel_gt {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
+
+ /*
+ * Default address space (either GGTT or ppGTT depending on arch).
+ *
+ * Reserved for exclusive use by the kernel.
+ */
+ struct i915_address_space *vm;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 75dd0e0367b7..4fb70a7716e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,12 +133,11 @@
*/
#include <linux/interrupt.h>
-#include "gem/i915_gem_context.h"
-
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
@@ -880,7 +879,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
- struct intel_engine_cs *owner = rq->hw_context->engine;
+ struct intel_engine_cs *owner = rq->context->engine;
/*
* Decouple the virtual breadcrumb before moving it
@@ -983,6 +982,58 @@ static void intel_engine_context_out(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
+
+static void
+execlists_check_context(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ const struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+ bool valid = true;
+ int x;
+
+ if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
+ pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_START],
+ i915_ggtt_offset(ring->vma));
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
+ valid = false;
+ }
+
+ if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID)) {
+ pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
+ engine->name,
+ regs[CTX_RING_CTL],
+ (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ valid = false;
+ }
+
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
+ pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
+ engine->name, regs[x + 1]);
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
+ valid = false;
+ }
+
+ WARN_ONCE(!valid, "Invalid lrc state found before submission\n");
+}
+
static void restore_default_state(struct intel_context *ce,
struct intel_engine_cs *engine)
{
@@ -999,7 +1050,7 @@ static void restore_default_state(struct intel_context *ce,
static void reset_active(struct i915_request *rq,
struct intel_engine_cs *engine)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
u32 head;
/*
@@ -1017,8 +1068,8 @@ static void reset_active(struct i915_request *rq,
* remain correctly ordered. And we defer to __i915_request_submit()
* so that all asynchronous waits are correctly handled.
*/
- GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
- __func__, engine->name, rq->fence.context, rq->fence.seqno);
+ ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+ rq->fence.context, rq->fence.seqno);
/* On resubmission of the active request, payload will be scrubbed */
if (i915_request_completed(rq))
@@ -1040,13 +1091,16 @@ static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
struct intel_engine_cs * const engine = rq->engine;
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
intel_context_get(ce);
- if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ if (unlikely(intel_context_is_banned(ce)))
reset_active(rq, engine);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ execlists_check_context(ce, engine);
+
if (ce->tag) {
/* Use a fixed tag for OA and friends */
ce->lrc_desc |= (u64)ce->tag << 32;
@@ -1059,7 +1113,7 @@ __execlists_schedule_in(struct i915_request *rq)
BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
}
- intel_gt_pm_get(engine->gt);
+ __intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -1069,7 +1123,7 @@ __execlists_schedule_in(struct i915_request *rq)
static inline struct i915_request *
execlists_schedule_in(struct i915_request *rq, int idx)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
struct intel_engine_cs *old;
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
@@ -1100,7 +1154,7 @@ static inline void
__execlists_schedule_out(struct i915_request *rq,
struct intel_engine_cs * const engine)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
/*
* NB process_csb() is not under the engine->active.lock and hence
@@ -1138,7 +1192,7 @@ __execlists_schedule_out(struct i915_request *rq,
static inline void
execlists_schedule_out(struct i915_request *rq)
{
- struct intel_context * const ce = rq->hw_context;
+ struct intel_context * const ce = rq->context;
struct intel_engine_cs *cur, *old;
trace_i915_request_out(rq);
@@ -1155,7 +1209,7 @@ execlists_schedule_out(struct i915_request *rq)
static u64 execlists_update_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->hw_context;
+ struct intel_context *ce = rq->context;
u64 desc = ce->lrc_desc;
u32 tail;
@@ -1186,13 +1240,8 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
- *
- * Furthermore, Braswell, at least, wants a full mb to be sure that
- * the writes are coherent in memory (visible to the GPU) prior to
- * execution, and not just visible to other CPUs (as is the result of
- * wmb).
*/
- mb();
+ wmb();
/* Wa_1607138340:tgl */
if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
@@ -1224,15 +1273,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
if (!ports[0])
return;
- GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
- engine->name, msg,
- ports[0]->fence.context,
- ports[0]->fence.seqno,
- i915_request_completed(ports[0]) ? "!" :
- i915_request_started(ports[0]) ? "*" :
- "",
- ports[1] ? ports[1]->fence.context : 0,
- ports[1] ? ports[1]->fence.seqno : 0);
+ ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
}
static __maybe_unused bool
@@ -1256,33 +1304,56 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
for (port = execlists->pending; (rq = *port); port++) {
- if (ce == rq->hw_context) {
- GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+ unsigned long flags;
+ bool ok = true;
+
+ GEM_BUG_ON(!kref_read(&rq->fence.refcount));
+ GEM_BUG_ON(!i915_request_is_active(rq));
+
+ if (ce == rq->context) {
+ GEM_TRACE_ERR("Dup context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
return false;
}
+ ce = rq->context;
- ce = rq->hw_context;
- if (i915_request_completed(rq))
+ /* Hold tightly onto the lock to prevent concurrent retires! */
+ if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
- if (i915_active_is_idle(&ce->active)) {
- GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+ if (i915_request_completed(rq))
+ goto unlock;
+
+ if (i915_active_is_idle(&ce->active) &&
+ !intel_context_is_barrier(ce)) {
+ GEM_TRACE_ERR("Inactive context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
if (!i915_vma_is_pinned(ce->state)) {
- GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+ GEM_TRACE_ERR("Unpinned context:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
- GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+ GEM_TRACE_ERR("Unpinned ring:%llx in pending[%zd]\n",
+ ce->timeline->fence_context,
port - execlists->pending);
- return false;
+ ok = false;
+ goto unlock;
}
+
+unlock:
+ spin_unlock_irqrestore(&rq->lock, flags);
+ if (!ok)
+ return false;
}
return ce;
@@ -1327,7 +1398,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- i915_gem_context_force_single_submission(ce->gem_context));
+ intel_context_force_single_submission(ce));
}
static bool can_merge_ctx(const struct intel_context *prev,
@@ -1363,7 +1434,7 @@ static bool can_merge_rq(const struct i915_request *prev,
(I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
return false;
- if (!can_merge_ctx(prev->hw_context, next->hw_context))
+ if (!can_merge_ctx(prev->context, next->context))
return false;
return true;
@@ -1411,7 +1482,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
if (!list_empty(&ve->context.signal_link)) {
list_move_tail(&ve->context.signal_link,
&engine->breadcrumbs.signalers);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
spin_unlock(&old->breadcrumbs.irq_lock);
}
@@ -1550,7 +1621,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return 0;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
- if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+ if (unlikely(intel_context_is_banned(rq->context)))
return 1;
return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1627,12 +1698,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = last_active(execlists);
if (last) {
if (need_preempt(engine, last, rb)) {
- GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
record_preemption(execlists);
/*
@@ -1658,16 +1729,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* tendency to ignore us rewinding the TAIL to the
* end of an earlier request.
*/
- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) {
- GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
- engine->name,
- last->fence.context,
- last->fence.seqno,
- last->sched.attr.priority,
- execlists->queue_priority_hint);
+ ENGINE_TRACE(engine,
+ "expired last=%llx:%lld, prio=%d, hint=%d\n",
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
ring_set_paused(engine, 1);
defer_active(engine);
@@ -1730,7 +1801,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq != ve->request);
GEM_BUG_ON(rq->engine != &ve->base);
- GEM_BUG_ON(rq->hw_context != &ve->context);
+ GEM_BUG_ON(rq->context != &ve->context);
if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) {
@@ -1744,14 +1815,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
return; /* leave this for another */
}
- GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
- engine->name,
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
ve->request = NULL;
ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -1849,7 +1920,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP.
*/
- if (last->hw_context == rq->hw_context)
+ if (last->context == rq->context)
goto done;
if (i915_request_has_sentinel(last))
@@ -1862,8 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the same context (even though a different
* request) to the second port.
*/
- if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context))
+ if (ctx_single_port_submission(last->context) ||
+ ctx_single_port_submission(rq->context))
goto done;
merge = false;
@@ -1877,8 +1948,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
GEM_BUG_ON(last &&
- !can_merge_ctx(last->hw_context,
- rq->hw_context));
+ !can_merge_ctx(last->context,
+ rq->context));
submit = true;
last = rq;
@@ -1907,9 +1978,6 @@ done:
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
- GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
- engine->name, execlists->queue_priority_hint,
- yesno(submit));
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
@@ -2058,7 +2126,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -2096,9 +2164,8 @@ static void process_csb(struct intel_engine_cs *engine)
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
- engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1]);
+ ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+ head, buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
promote = gen12_csb_parse(execlists, buf + 2 * head);
@@ -2189,10 +2256,9 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- GEM_TRACE("%s: preempt timeout %lu+%ums\n",
- engine->name,
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
intel_engine_reset(engine, "preemption time out");
tasklet_enable(&engine->execlists.tasklet);
@@ -2369,7 +2435,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = ring->head;
regs[CTX_RING_TAIL] = ring->tail;
@@ -2497,7 +2563,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -2898,8 +2964,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
- GEM_TRACE("%s: depth<-%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth<-%d\n",
+ atomic_read(&execlists->tasklet.count));
/*
* Prevent request submission to the hardware until we have
@@ -2952,22 +3018,18 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
WRITE_ONCE(*execlists->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
+ /*
+ * Sometimes Icelake forgets to reset its pointers on a GPU reset.
+ * Bludgeon them with a mmio update to be sure.
+ */
+ ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
+ reset_value << 8 | reset_value);
+ ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
+
invalidate_csb_entries(&execlists->csb_status[0],
&execlists->csb_status[reset_value]);
}
-static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
-{
- if (INTEL_GEN(engine->i915) >= 12)
- return 0x60;
- else if (INTEL_GEN(engine->i915) >= 9)
- return 0x54;
- else if (engine->class == RENDER_CLASS)
- return 0x58;
- else
- return -1;
-}
-
static void __execlists_reset_reg_state(const struct intel_context *ce,
const struct intel_engine_cs *engine)
{
@@ -3008,7 +3070,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* We still have requests in-flight; the engine should be active */
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
- ce = rq->hw_context;
+ ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
if (i915_request_completed(rq)) {
@@ -3065,8 +3127,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
- engine->name, ce->ring->head, ce->ring->tail);
+ ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+ ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
__execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
@@ -3078,11 +3140,11 @@ unwind:
__unwind_incomplete_requests(engine);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
spin_lock_irqsave(&engine->active.lock, flags);
@@ -3096,14 +3158,14 @@ static void nop_submission_tasklet(unsigned long data)
/* The driver is wedged; don't process any more events. */
}
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
+static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -3190,13 +3252,13 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
-static int gen8_emit_bb_start(struct i915_request *rq,
- u64 offset, u32 len,
- const unsigned int flags)
+static int gen8_emit_bb_start_noarb(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
{
u32 *cs;
@@ -3230,7 +3292,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
return 0;
}
-static int gen9_emit_bb_start(struct i915_request *rq,
+static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
@@ -3685,12 +3747,12 @@ static void execlists_park(struct intel_engine_cs *engine)
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
- engine->cancel_requests = execlists_cancel_requests;
engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
+ engine->reset.rewind = execlists_reset_rewind;
+ engine->reset.cancel = execlists_reset_cancel;
engine->reset.finish = execlists_reset_finish;
engine->park = execlists_park;
@@ -3705,13 +3767,27 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) >= 12)
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
+
+ if (intel_engine_has_preemption(engine))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen8_emit_bb_start_noarb;
+}
+
+static void execlists_shutdown(struct intel_engine_cs *engine)
+{
+ /* Synchronise with residual timers and any softirq they raise */
+ del_timer_sync(&engine->execlists.timer);
+ del_timer_sync(&engine->execlists.preempt);
+ tasklet_kill(&engine->execlists.tasklet);
}
-static void execlists_destroy(struct intel_engine_cs *engine)
+static void execlists_release(struct intel_engine_cs *engine)
{
+ execlists_shutdown(engine);
+
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
- kfree(engine);
}
static void
@@ -3719,13 +3795,9 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- engine->destroy = execlists_destroy;
+ engine->release = execlists_release;
engine->resume = execlists_resume;
- engine->reset.prepare = execlists_reset_prepare;
- engine->reset.reset = execlists_reset;
- engine->reset.finish = execlists_reset_finish;
-
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
@@ -3748,10 +3820,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- if (IS_GEN(engine->i915, 8))
- engine->emit_bb_start = gen8_emit_bb_start;
- else
- engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -3795,6 +3863,11 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ u32 base = engine->mmio_base;
+
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
@@ -3806,21 +3879,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
if (engine->class == RENDER_CLASS)
rcs_submission_override(engine);
- return 0;
-}
-
-int intel_execlists_submission_init(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_private *i915 = engine->i915;
- struct intel_uncore *uncore = engine->uncore;
- u32 base = engine->mmio_base;
- int ret;
-
- ret = intel_engine_init_common(engine);
- if (ret)
- return ret;
-
if (intel_init_workaround_bb(engine))
/*
* We continue even if we fail to initialize WA batch
@@ -3901,7 +3959,7 @@ static void init_common_reg_state(u32 * const regs,
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
- regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
regs[CTX_BB_STATE] = RING_BB_PPGTT;
}
@@ -4019,6 +4077,7 @@ populate_lr_context(struct intel_context *ce,
memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false;
}
@@ -4166,6 +4225,13 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
ve->siblings[0]);
}
+static int virtual_context_alloc(struct intel_context *ce)
+{
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+
+ return __execlists_context_alloc(ce, ve->siblings[0]);
+}
+
static int virtual_context_pin(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
@@ -4203,6 +4269,8 @@ static void virtual_context_exit(struct intel_context *ce)
}
static const struct intel_context_ops virtual_context_ops = {
+ .alloc = virtual_context_alloc,
+
.pin = virtual_context_pin,
.unpin = execlists_context_unpin,
@@ -4229,10 +4297,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = ve->siblings[0]->mask;
}
- GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
- ve->base.name,
- rq->fence.context, rq->fence.seqno,
- mask, ve->base.execlists.queue_priority_hint);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+ rq->fence.context, rq->fence.seqno,
+ mask, ve->base.execlists.queue_priority_hint);
return mask;
}
@@ -4323,10 +4390,9 @@ static void virtual_submit_request(struct i915_request *rq)
struct i915_request *old;
unsigned long flags;
- GEM_TRACE("%s: rq=%llx:%lld\n",
- ve->base.name,
- rq->fence.context,
- rq->fence.seqno);
+ ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+ rq->fence.context,
+ rq->fence.seqno);
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
@@ -4394,8 +4460,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
}
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count)
{
struct virtual_engine *ve;
@@ -4406,13 +4471,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
return ERR_PTR(-EINVAL);
if (count == 1)
- return intel_context_create(ctx, siblings[0]);
+ return intel_context_create(siblings[0]);
ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
- ve->base.i915 = ctx->i915;
+ ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
@@ -4439,7 +4504,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base);
-
intel_engine_init_execlists(&ve->base);
ve->base.cops = &virtual_context_ops;
@@ -4455,7 +4519,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
virtual_submission_tasklet,
(unsigned long)ve);
- intel_context_init(&ve->context, ctx, &ve->base);
+ intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
@@ -4522,12 +4586,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
- err = __execlists_context_alloc(&ve->context, siblings[0]);
- if (err)
- goto err_put;
-
- __set_bit(CONTEXT_ALLOC_BIT, &ve->context.flags);
-
return &ve->context;
err_put:
@@ -4536,14 +4594,12 @@ err_put:
}
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
{
struct virtual_engine *se = to_virtual_engine(src);
struct intel_context *dst;
- dst = intel_execlists_create_virtual(ctx,
- se->siblings,
+ dst = intel_execlists_create_virtual(se->siblings,
se->num_siblings);
if (IS_ERR(dst))
return dst;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 04511d8ebdc1..dfbc214e14f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -83,7 +83,6 @@ enum {
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_execlists_submission_setup(struct intel_engine_cs *engine);
-int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
/* At the start of the context image is its per-process HWS page */
@@ -111,13 +110,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int max);
struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
unsigned int count);
struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
- struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 06ab0276e10e..08a3be65f700 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -13,8 +13,8 @@
#define CTX_CONTEXT_CONTROL (0x02 + 1)
#define CTX_RING_HEAD (0x04 + 1)
#define CTX_RING_TAIL (0x06 + 1)
-#define CTX_RING_BUFFER_START (0x08 + 1)
-#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
+#define CTX_RING_START (0x08 + 1)
+#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 2b977991b785..893249ea48d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -283,65 +283,42 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- bool result = false;
-
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tigerlake_mocs_table);
table->table = tigerlake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
- result = true;
} else if (IS_GEN(i915, 11)) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
- result = true;
} else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = skylake_mocs_table;
- result = true;
} else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
- result = true;
} else {
WARN_ONCE(INTEL_GEN(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
+ return false;
}
+ if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
+ return false;
+
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN(i915, 9)) {
int i;
for (i = 0; i < table->size; i++)
- if (WARN_ON(table->table[i].l3cc_value &
- (L3_ESC(1) | L3_SCC(0x7))))
+ if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
return false;
}
- return result;
-}
-
-static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
-{
- switch (engine->id) {
- case RCS0:
- return GEN9_GFX_MOCS(index);
- case VCS0:
- return GEN9_MFX0_MOCS(index);
- case BCS0:
- return GEN9_BLT_MOCS(index);
- case VECS0:
- return GEN9_VEBOX_MOCS(index);
- case VCS1:
- return GEN9_MFX1_MOCS(index);
- case VCS2:
- return GEN11_MFX2_MOCS(index);
- default:
- MISSING_CASE(engine->id);
- return INVALID_MMIO_REG;
- }
+ return true;
}
/*
@@ -351,29 +328,47 @@ static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].control_value;
return table->table[I915_MOCS_PTE].control_value;
}
-static void init_mocs_table(struct intel_engine_cs *engine,
- const struct drm_i915_mocs_table *table)
+#define for_each_mocs(mocs, t, i) \
+ for (i = 0; \
+ i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
+ i++)
+
+static void __init_mocs_table(struct intel_uncore *uncore,
+ const struct drm_i915_mocs_table *table,
+ u32 addr)
{
- struct intel_uncore *uncore = engine->uncore;
- u32 unused_value = table->table[I915_MOCS_PTE].control_value;
unsigned int i;
+ u32 mocs;
+
+ for_each_mocs(mocs, table, i)
+ intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
+}
- for (i = 0; i < table->size; i++)
- intel_uncore_write_fw(uncore,
- mocs_register(engine, i),
- get_entry_control(table, i));
+static u32 mocs_offset(const struct intel_engine_cs *engine)
+{
+ static const u32 offset[] = {
+ [RCS0] = __GEN9_RCS0_MOCS0,
+ [VCS0] = __GEN9_VCS0_MOCS0,
+ [VCS1] = __GEN9_VCS1_MOCS0,
+ [VECS0] = __GEN9_VECS0_MOCS0,
+ [BCS0] = __GEN9_BCS0_MOCS0,
+ [VCS2] = __GEN11_VCS2_MOCS0,
+ };
+
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
+ return offset[engine->id];
+}
- /* All remaining entries are unused */
- for (; i < table->n_entries; i++)
- intel_uncore_write_fw(uncore,
- mocs_register(engine, i),
- unused_value);
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
+{
+ __init_mocs_table(engine->uncore, table, mocs_offset(engine));
}
/*
@@ -383,51 +378,34 @@ static void init_mocs_table(struct intel_engine_cs *engine,
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
- if (table->table[index].used)
+ if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
return table->table[I915_MOCS_PTE].l3cc_value;
}
-static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
- u16 low,
- u16 high)
+static inline u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
+#define for_each_l3cc(l3cc, t, i) \
+ for (i = 0; \
+ i < ((t)->n_entries + 1) / 2 ? \
+ (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
+ get_entry_l3cc((t), 2 * i + 1))), 1 : \
+ 0; \
+ i++)
+
static void init_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table)
{
struct intel_uncore *uncore = engine->uncore;
- u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
unsigned int i;
+ u32 l3cc;
- for (i = 0; i < table->size / 2; i++) {
- u16 low = get_entry_l3cc(table, 2 * i);
- u16 high = get_entry_l3cc(table, 2 * i + 1);
-
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, low, high));
- }
-
- /* Odd table size - 1 left over */
- if (table->size & 1) {
- u16 low = get_entry_l3cc(table, 2 * i);
-
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, low, unused_value));
- i++;
- }
-
- /* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++)
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(table, unused_value,
- unused_value));
+ for_each_l3cc(l3cc, table, i)
+ intel_uncore_write_fw(uncore, GEN9_LNCFCMOCS(i), l3cc);
}
void intel_mocs_init_engine(struct intel_engine_cs *engine)
@@ -448,11 +426,14 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
init_l3cc_table(engine, &table);
}
-static void intel_mocs_init_global(struct intel_gt *gt)
+static u32 global_mocs_offset(void)
+{
+ return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
+}
+
+static void init_global_mocs(struct intel_gt *gt)
{
- struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
- unsigned int index;
/*
* LLC and eDRAM control values are not applicable to dgfx
@@ -460,32 +441,18 @@ static void intel_mocs_init_global(struct intel_gt *gt)
if (IS_DGFX(gt->i915))
return;
- GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
if (!get_mocs_settings(gt->i915, &table))
return;
- if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
- return;
-
- for (index = 0; index < table.size; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to the invalid entry (index 0). These
- * entries are officially undefined and no contract for the contents and
- * settings is given for these entries.
- */
- for (; index < table.n_entries; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[0].control_value);
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
void intel_mocs_init(struct intel_gt *gt)
{
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- intel_mocs_init_global(gt);
+ init_global_mocs(gt);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_mocs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 700104b90163..9e303c29d6e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -88,21 +88,18 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
+ * service latency, and puts it under 10us for Icelake, similar to
+ * Broadwell+, To be conservative, we want to factor in a context
+ * switch on top (due to ksoftirqd).
*/
- set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
/* 3a: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1);
set(uncore, GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE |
@@ -173,10 +170,10 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
else
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
- set(uncore, GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
+ rc6->ctl_enable =
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode;
/*
* WaRsDisableCoarsePowerGating:skl,cnl
@@ -203,10 +200,10 @@ static void gen8_rc6_enable(struct intel_rc6 *rc6)
set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL,
+ rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
+ GEN6_RC_CTL_RC6_ENABLE;
}
static void gen6_rc6_enable(struct intel_rc6 *rc6)
@@ -242,10 +239,10 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
if (HAS_RC6pp(i915))
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- set(uncore, GEN6_RC_CONTROL,
+ rc6->ctl_enable =
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
+ GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
@@ -363,7 +360,7 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
- set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+ rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
}
static void vlv_rc6_enable(struct intel_rc6 *rc6)
@@ -389,8 +386,8 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- set(uncore, GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+ rc6->ctl_enable =
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
}
static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
@@ -491,64 +488,19 @@ static void rpm_put(struct intel_rc6 *rc6)
rc6->wakeref = false;
}
-static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
-{
- return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
-}
-
-static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+static bool pctx_corrupted(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (intel_rc6_ctx_corrupted(rc6)) {
- DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
- rc6->ctx_corrupted = true;
- }
-}
-
-/**
- * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
- * @rc6: rc6 state
- *
- * Perform any steps needed to re-init the RC6 CTX WA after system resume.
- */
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
-{
- if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
- DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
- rc6->ctx_corrupted = false;
- }
-}
-
-/**
- * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
- * @rc6: rc6 state
- *
- * Check if an RC6 CTX corruption has happened since the last check and if so
- * disable RC6 and runtime power management.
-*/
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
-{
- struct drm_i915_private *i915 = rc6_to_i915(rc6);
-
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (rc6->ctx_corrupted)
- return;
-
- if (!intel_rc6_ctx_corrupted(rc6))
- return;
-
- DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+ return false;
- intel_rc6_disable(rc6);
- rc6->ctx_corrupted = true;
+ if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
+ return false;
- return;
+ dev_notice(i915->drm.dev,
+ "RC6 context corruption, disabling runtime power management\n");
+ return true;
}
static void __intel_rc6_disable(struct intel_rc6 *rc6)
@@ -575,8 +527,6 @@ void intel_rc6_init(struct intel_rc6 *rc6)
if (!rc6_supported(rc6))
return;
- intel_rc6_ctx_wa_init(rc6);
-
if (IS_CHERRYVIEW(i915))
err = chv_rc6_init(rc6);
else if (IS_VALLEYVIEW(i915))
@@ -611,9 +561,6 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
GEM_BUG_ON(rc6->enabled);
- if (rc6->ctx_corrupted)
- return;
-
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (IS_CHERRYVIEW(i915))
@@ -629,13 +576,51 @@ void intel_rc6_enable(struct intel_rc6 *rc6)
else if (INTEL_GEN(i915) >= 6)
gen6_rc6_enable(rc6);
+ rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ rc6->ctl_enable = 0;
+
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (unlikely(pctx_corrupted(rc6)))
+ return;
+
/* rc6 is ready, runtime-pm is go! */
rpm_put(rc6);
rc6->enabled = true;
}
+void intel_rc6_unpark(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ /* Restore HW timers for automatic RC6 entry while busy */
+ set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+}
+
+void intel_rc6_park(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->enabled)
+ return;
+
+ if (unlikely(pctx_corrupted(rc6))) {
+ intel_rc6_disable(rc6);
+ return;
+ }
+
+ if (!rc6->manual)
+ return;
+
+ /* Turn off the HW timers and go directly to rc6 */
+ set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+ set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT);
+}
+
void intel_rc6_disable(struct intel_rc6 *rc6)
{
if (!rc6->enabled)
@@ -785,3 +770,7 @@ u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
{
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_rc6.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
index 1370f6834a4c..9f0f23fca8af 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -15,6 +15,9 @@ struct intel_rc6;
void intel_rc6_init(struct intel_rc6 *rc6);
void intel_rc6_fini(struct intel_rc6 *rc6);
+void intel_rc6_unpark(struct intel_rc6 *rc6);
+void intel_rc6_park(struct intel_rc6 *rc6);
+
void intel_rc6_sanitize(struct intel_rc6 *rc6);
void intel_rc6_enable(struct intel_rc6 *rc6);
void intel_rc6_disable(struct intel_rc6 *rc6);
@@ -22,7 +25,4 @@ void intel_rc6_disable(struct intel_rc6 *rc6);
u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
-void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
-void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
-
#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
index 89ad5697a8d4..bfbb623f7a4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -18,12 +18,14 @@ struct intel_rc6 {
u64 prev_hw_residency[4];
u64 cur_residency[4];
+ u32 ctl_enable;
+
struct drm_i915_gem_object *pctx;
bool supported : 1;
bool enabled : 1;
+ bool manual : 1;
bool wakeref : 1;
- bool ctx_corrupted : 1;
};
#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index c4edc35e7d89..5954ecc3207f 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -29,16 +29,6 @@
#include "intel_renderstate.h"
#include "intel_ring.h"
-struct intel_renderstate {
- const struct intel_renderstate_rodata *rodata;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 batch_offset;
- u32 batch_size;
- u32 aux_offset;
- u32 aux_size;
-};
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
@@ -84,11 +74,11 @@ static int render_state_setup(struct intel_renderstate *so,
u32 *d;
int ret;
- ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
+ ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
if (ret)
return ret;
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
+ d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -166,7 +156,7 @@ static int render_state_setup(struct intel_renderstate *so,
ret = 0;
out:
- i915_gem_object_finish_access(so->obj);
+ i915_gem_object_finish_access(so->vma->obj);
return ret;
err:
@@ -177,61 +167,84 @@ err:
#undef OUT_BATCH
-int intel_renderstate_emit(struct i915_request *rq)
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine = rq->engine;
- struct intel_renderstate so = {}; /* keep the compiler happy */
+ struct drm_i915_gem_object *obj;
int err;
- so.rodata = render_state_get_rodata(engine);
- if (!so.rodata)
+ memset(so, 0, sizeof(*so));
+
+ so->rodata = render_state_get_rodata(engine);
+ if (!so->rodata)
return 0;
- if (so.rodata->batch_items * 4 > PAGE_SIZE)
+ if (so->rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
- so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(so.obj))
- return PTR_ERR(so.obj);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
- if (IS_ERR(so.vma)) {
- err = PTR_ERR(so.vma);
+ so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(so->vma)) {
+ err = PTR_ERR(so->vma);
goto err_obj;
}
- err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_vma;
- err = render_state_setup(&so, rq->i915);
+ err = render_state_setup(so, engine->i915);
if (err)
goto err_unpin;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(so->vma);
+err_vma:
+ i915_vma_close(so->vma);
+err_obj:
+ i915_gem_object_put(obj);
+ so->vma = NULL;
+ return err;
+}
+
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int err;
+
+ if (!so->vma)
+ return 0;
+
err = engine->emit_bb_start(rq,
- so.batch_offset, so.batch_size,
+ so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
- if (so.aux_size > 8) {
+ if (so->aux_size > 8) {
err = engine->emit_bb_start(rq,
- so.aux_offset, so.aux_size,
+ so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (err)
- goto err_unpin;
+ return err;
}
- i915_vma_lock(so.vma);
- err = i915_request_await_object(rq, so.vma->obj, false);
+ i915_vma_lock(so->vma);
+ err = i915_request_await_object(rq, so->vma->obj, false);
if (err == 0)
- err = i915_vma_move_to_active(so.vma, rq, 0);
- i915_vma_unlock(so.vma);
-err_unpin:
- i915_vma_unpin(so.vma);
-err_vma:
- i915_vma_close(so.vma);
-err_obj:
- i915_gem_object_put(so.obj);
+ err = i915_vma_move_to_active(so->vma, rq, 0);
+ i915_vma_unlock(so->vma);
+
return err;
}
+
+void intel_renderstate_fini(struct intel_renderstate *so)
+{
+ i915_vma_unpin_and_release(&so->vma, 0);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 8d5079145054..5700be69a05a 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -27,6 +27,8 @@
#include <linux/types.h>
struct i915_request;
+struct intel_engine_cs;
+struct i915_vma;
struct intel_renderstate_rodata {
const u32 *reloc;
@@ -46,6 +48,19 @@ extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
-int intel_renderstate_emit(struct i915_request *rq);
+struct intel_renderstate {
+ const struct intel_renderstate_rodata *rodata;
+ struct i915_vma *vma;
+ u32 batch_offset;
+ u32 batch_size;
+ u32 aux_offset;
+ u32 aux_size;
+};
+
+int intel_renderstate_init(struct intel_renderstate *so,
+ struct intel_engine_cs *engine);
+int intel_renderstate_emit(struct intel_renderstate *so,
+ struct i915_request *rq);
+void intel_renderstate_fini(struct intel_renderstate *so);
#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c97423a76642..1c51296646e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -21,6 +21,7 @@
#include "intel_reset.h"
#include "uc/intel_guc.h"
+#include "uc/intel_guc_submission.h"
#define RESET_MAX_RETRIES 3
@@ -40,27 +41,29 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *hung_ctx = rq->gem_context;
+ struct intel_context *hung_ctx = rq->context;
if (!i915_request_is_active(rq))
return;
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->gem_context == hung_ctx)
+ if (rq->context == hung_ctx)
i915_request_skip(rq, -EIO);
}
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
- const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
- unsigned int score;
+ struct drm_i915_file_private *file_priv = ctx->file_priv;
unsigned long prev_hang;
+ unsigned int score;
+
+ if (IS_ERR_OR_NULL(file_priv))
+ return;
- if (i915_gem_context_is_banned(ctx))
+ score = 0;
+ if (banned)
score = I915_CLIENT_SCORE_CONTEXT_BAN;
- else
- score = 0;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -75,17 +78,38 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
}
}
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
{
+ struct i915_gem_context *ctx;
unsigned long prev_hang;
bool banned;
int i;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
+
+ if (i915_gem_context_is_closed(ctx)) {
+ intel_context_set_banned(rq->context);
+ banned = true;
+ goto out;
+ }
+
atomic_inc(&ctx->guilty_count);
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
- if (!i915_gem_context_is_bannable(ctx))
- return false;
+ if (!i915_gem_context_is_bannable(ctx)) {
+ banned = false;
+ goto out;
+ }
+
+ dev_notice(ctx->i915->drm.dev,
+ "%s context reset due to GPU hang\n",
+ ctx->name);
/* Record the timestamp for the last N hangs */
prev_hang = ctx->hang_timestamp[0];
@@ -100,18 +124,25 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
if (banned) {
DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count));
- i915_gem_context_set_banned(ctx);
+ intel_context_set_banned(rq->context);
}
- if (!IS_ERR_OR_NULL(ctx->file_priv))
- client_mark_guilty(ctx->file_priv, ctx);
+ client_mark_guilty(ctx, banned);
+out:
+ i915_gem_context_put(ctx);
return banned;
}
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
{
- atomic_inc(&ctx->active_count);
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ atomic_inc(&ctx->active_count);
+ rcu_read_unlock();
}
void __i915_request_reset(struct i915_request *rq, bool guilty)
@@ -124,14 +155,16 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
GEM_BUG_ON(i915_request_completed(rq));
+ rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_skip(rq, -EIO);
- if (context_mark_guilty(rq->gem_context))
+ if (mark_guilty(rq))
engine_skip_context(rq);
} else {
dma_fence_set_error(&rq->fence, -EAGAIN);
- context_mark_innocent(rq->gem_context);
+ mark_innocent(rq);
}
+ rcu_read_unlock();
}
static bool i915_in_reset(struct pci_dev *pdev)
@@ -647,7 +680,8 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
- engine->reset.prepare(engine);
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
}
static void revoke_mmaps(struct intel_gt *gt)
@@ -667,8 +701,13 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
- node = &vma->obj->base.vma_node;
+
+ if (!vma->mmo)
+ continue;
+
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
@@ -722,10 +761,11 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
- engine->reset.finish(engine);
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
- intel_engine_breadcrumbs_irq(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -754,7 +794,7 @@ static void nop_submit_request(struct i915_request *request)
i915_request_mark_complete(request);
spin_unlock_irqrestore(&engine->active.lock, flags);
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
}
static void __intel_gt_set_wedged(struct intel_gt *gt)
@@ -799,7 +839,8 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
/* Mark all executing requests as skipped */
for_each_engine(engine, gt, id)
- engine->cancel_requests(engine);
+ if (engine->reset.cancel)
+ engine->reset.cancel(engine);
reset_finish(gt, awake);
@@ -820,7 +861,6 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
- unsigned long flags;
bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
@@ -842,7 +882,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*
* No more can be submitted until we reset the wedged bit.
*/
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) {
struct dma_fence *fence;
@@ -850,7 +890,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!fence)
continue;
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/*
* All internal dependencies (i915_requests) will have
@@ -863,10 +903,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
dma_fence_put(fence);
/* Restart iteration after droping lock */
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
@@ -1070,9 +1110,10 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
+ bool uses_guc = intel_engine_in_guc_submission_mode(engine);
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+ ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
@@ -1085,14 +1126,14 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!engine->gt->uc.guc.execbuf_client)
+ if (!uses_guc)
ret = intel_gt_reset_engine(engine);
else
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->gt->uc.guc.execbuf_client ? "GuC " : "",
+ uses_guc ? "GuC " : "",
engine->name, ret);
goto out;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a47d5a7c32c9..81f872f9ef03 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -362,6 +362,12 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
*/
flags |= PIPE_CONTROL_CS_STALL;
+ /*
+ * CS_STALL suggests at least a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
@@ -380,13 +386,6 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
- /*
- * TLB invalidate requires a post-sync write.
- */
- flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
- flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
@@ -454,7 +453,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
- *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
@@ -496,14 +496,13 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
- struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = lower_32_bits(phys);
- if (INTEL_GEN(dev_priv) >= 4)
+ if (INTEL_GEN(engine->i915) >= 4)
addr |= (phys >> 28) & 0xf0;
- I915_WRITE(HWS_PGA, addr);
+ intel_uncore_write(engine->uncore, HWS_PGA, addr);
}
static struct page *status_page(struct intel_engine_cs *engine)
@@ -522,14 +521,13 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
- struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t hwsp;
/*
* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN(dev_priv, 7)) {
+ if (IS_GEN(engine->i915, 7)) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -551,14 +549,14 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN(dev_priv, 6)) {
+ } else if (IS_GEN(engine->i915, 6)) {
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
hwsp = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(hwsp, offset);
- POSTING_READ(hwsp);
+ intel_uncore_write(engine->uncore, hwsp, offset);
+ intel_uncore_posting_read(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
@@ -633,8 +631,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->legacy.ring;
int ret = 0;
- GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
- engine->name, ring->head, ring->tail);
+ ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+ ring->head, ring->tail);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
@@ -721,7 +719,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
}
/* Papering over lost _interrupts_ immediately following the restart */
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
out:
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
@@ -747,10 +745,10 @@ static void reset_prepare(struct intel_engine_cs *engine)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+ ENGINE_TRACE(engine, "timed out on STOP_RING\n");
intel_uncore_write_fw(uncore,
RING_HEAD(base),
@@ -766,12 +764,11 @@ static void reset_prepare(struct intel_engine_cs *engine)
/* Check acts as a post */
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
+ ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
-static void reset_ring(struct intel_engine_cs *engine, bool stalled)
+static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct i915_request *pos, *rq;
unsigned long flags;
@@ -842,7 +839,8 @@ static void reset_finish(struct intel_engine_cs *engine)
static int rcs_resume(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
@@ -854,13 +852,14 @@ static int rcs_resume(struct intel_engine_cs *engine)
* they are already accustomed to from before contexts were
* enabled.
*/
- if (IS_GEN(dev_priv, 4))
- I915_WRITE(ECOSKPD,
+ if (IS_GEN(i915, 4))
+ intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(dev_priv, 4, 6))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
+ if (IS_GEN_RANGE(i915, 4, 6))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -868,38 +867,40 @@ static int rcs_resume(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, MI_MODE,
+ _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(GFX_MODE,
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore, GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(dev_priv, 7))
- I915_WRITE(GFX_MODE_GEN7,
+ if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore, GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN(dev_priv, 6)) {
+ if (IS_GEN(i915, 6)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
- I915_WRITE(CACHE_MODE_0,
+ intel_uncore_write(uncore, CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_GEN_RANGE(i915, 6, 7))
+ intel_uncore_write(uncore, INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
return xcs_resume(engine);
}
-static void cancel_requests(struct intel_engine_cs *engine)
+static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -1318,6 +1319,8 @@ static int ring_context_alloc(struct intel_context *ce)
return PTR_ERR(vma);
ce->state = vma;
+ if (engine->default_state)
+ __set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
return 0;
@@ -1360,45 +1363,37 @@ static const struct intel_context_ops ring_context_ops = {
.destroy = ring_context_destroy,
};
-static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq,
+ const struct i915_ppgtt *ppgtt,
+ u32 valid)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
- *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = valid;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static int flush_pd_dir(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Stall until the page table load is complete */
+ /* Stall until the page table load is complete? */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ *cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
- *cs++ = MI_NOOP;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
+
return 0;
}
@@ -1485,7 +1480,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+ *cs++ = i915_ggtt_offset(rq->context->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1555,10 +1550,10 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
static int remap_l3(struct i915_request *rq)
{
- struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_gem_context *ctx = i915_request_gem_context(rq);
int i, err;
- if (!ctx->remap_slice)
+ if (!ctx || !ctx->remap_slice)
return 0;
for (i = 0; i < MAX_L3_SLICES; i++) {
@@ -1574,34 +1569,50 @@ static int remap_l3(struct i915_request *rq)
return 0;
}
+static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
+{
+ int ret;
+
+ if (!vm)
+ return 0;
+
+ ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /*
+ * Not only do we need a full barrier (post-sync write) after
+ * invalidating the TLBs, but we need to wait a little bit
+ * longer. Whether this is merely delaying us, or the
+ * subsequent flush is a key part of serialising with the
+ * post-sync op, this extra pass appears vital before a
+ * mm switch!
+ */
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
+ if (ret)
+ return ret;
+
+ return rq->engine->emit_flush(rq, EMIT_FLUSH);
+}
+
static int switch_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->hw_context;
- struct i915_address_space *vm = vm_alias(ce);
+ struct intel_context *ce = rq->context;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (vm) {
- ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
- if (ret)
- return ret;
- }
+ ret = switch_mm(rq, vm_alias(ce));
+ if (ret)
+ return ret;
if (ce->state) {
u32 hw_flags;
GEM_BUG_ON(rq->engine->id != RCS0);
- /*
- * The kernel context(s) is treated as pure scratch and is not
- * expected to retain any state (as we sacrifice it during
- * suspend and on resume it may be corrupted). This is ok,
- * as nothing actually executes using the kernel context; it
- * is purely used for flushing user contexts.
- */
hw_flags = 0;
- if (i915_gem_context_is_kernel(rq->gem_context))
+ if (!test_bit(CONTEXT_VALID_BIT, &ce->flags))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
@@ -1609,34 +1620,6 @@ static int switch_context(struct i915_request *rq)
return ret;
}
- if (vm) {
- struct intel_engine_cs *engine = rq->engine;
-
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- return ret;
-
- ret = flush_pd_dir(rq);
- if (ret)
- return ret;
-
- /*
- * Not only do we need a full barrier (post-sync write) after
- * invalidating the TLBs, but we need to wait a little bit
- * longer. Whether this is merely delaying us, or the
- * subsequent flush is a key part of serialising with the
- * post-sync op, this extra pass appears vital before a
- * mm switch!
- */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- return ret;
-
- ret = engine->emit_flush(rq, EMIT_FLUSH);
- if (ret)
- return ret;
- }
-
ret = remap_l3(rq);
if (ret)
return ret;
@@ -1648,7 +1631,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+ GEM_BUG_ON(!intel_context_is_pinned(request->context));
GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
@@ -1804,7 +1787,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
- engine->cancel_requests = cancel_requests;
engine->park = NULL;
engine->unpark = NULL;
@@ -1816,7 +1798,7 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
-static void ring_destroy(struct intel_engine_cs *engine)
+static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1830,8 +1812,6 @@ static void ring_destroy(struct intel_engine_cs *engine)
intel_timeline_unpin(engine->legacy.timeline);
intel_timeline_put(engine->legacy.timeline);
-
- kfree(engine);
}
static void setup_irq(struct intel_engine_cs *engine)
@@ -1862,11 +1842,12 @@ static void setup_common(struct intel_engine_cs *engine)
setup_irq(engine);
- engine->destroy = ring_destroy;
+ engine->release = ring_release;
engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
- engine->reset.reset = reset_ring;
+ engine->reset.rewind = reset_rewind;
+ engine->reset.cancel = reset_cancel;
engine->reset.finish = reset_finish;
engine->cops = &ring_context_ops;
@@ -1977,6 +1958,10 @@ static void setup_vecs(struct intel_engine_cs *engine)
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
+ struct intel_timeline *timeline;
+ struct intel_ring *ring;
+ int err;
+
setup_common(engine);
switch (engine->class) {
@@ -1997,15 +1982,6 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
return -ENODEV;
}
- return 0;
-}
-
-int intel_ring_submission_init(struct intel_engine_cs *engine)
-{
- struct intel_timeline *timeline;
- struct intel_ring *ring;
- int err;
-
timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
@@ -2031,16 +2007,10 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
engine->legacy.ring = ring;
engine->legacy.timeline = timeline;
- err = intel_engine_init_common(engine);
- if (err)
- goto err_ring_unpin;
-
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
-err_ring_unpin:
- intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 20d6ee148afc..f232036c3c7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -37,6 +37,11 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
return mask & ~rps->pm_intrmsk_mbz;
}
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
{
u32 mask = 0;
@@ -78,8 +83,7 @@ static void rps_enable_interrupts(struct intel_rps *rps)
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
- intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
- rps_pm_mask(rps, rps->cur_freq));
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
}
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
@@ -113,8 +117,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
rps->pm_events = 0;
- intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
- rps_pm_sanitize_mask(rps, ~0u));
+ set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
@@ -573,25 +576,21 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
if (IS_VALLEYVIEW(i915))
goto skip_hw_write;
- intel_uncore_write(uncore, GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(i915, ei_up));
- intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(i915,
- ei_up * threshold_up / 100));
-
- intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(i915, ei_down));
- intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(i915,
- ei_down * threshold_down / 100));
-
- intel_uncore_write(uncore, GEN6_RP_CONTROL,
- (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
+ set(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
+
+ set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
+ set(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
+
+ set(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
rps->power.mode = new_power;
@@ -666,7 +665,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
swreq = (GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
- intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+ set(uncore, GEN6_RPNSWREQ, swreq);
return 0;
}
@@ -683,7 +682,7 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
return err;
}
-static int rps_set(struct intel_rps *rps, u8 val)
+static int rps_set(struct intel_rps *rps, u8 val, bool update)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
@@ -701,7 +700,8 @@ static int rps_set(struct intel_rps *rps, u8 val)
if (err)
return err;
- gen6_rps_set_thresholds(rps, val);
+ if (update)
+ gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
return 0;
@@ -761,7 +761,7 @@ void intel_rps_park(struct intel_rps *rps)
* power than the render powerwell.
*/
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
- rps_set(rps, rps->idle_freq);
+ rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
}
@@ -790,14 +790,16 @@ void intel_rps_boost(struct i915_request *rq)
int intel_rps_set(struct intel_rps *rps, u8 val)
{
- int err = 0;
+ int err;
lockdep_assert_held(&rps->lock);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
if (rps->active) {
- err = rps_set(rps, val);
+ err = rps_set(rps, val, true);
+ if (err)
+ return err;
/*
* Make sure we continue to get interrupts
@@ -806,18 +808,15 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
struct intel_uncore *uncore = rps_to_uncore(rps);
- intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
- rps_limits(rps, val));
+ set(uncore,
+ GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
- intel_uncore_write(uncore, GEN6_PMINTRMSK,
- rps_pm_mask(rps, val));
+ set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
}
}
- if (err == 0)
- rps->cur_freq = val;
-
- return err;
+ rps->cur_freq = val;
+ return 0;
}
static void gen6_rps_init(struct intel_rps *rps)
@@ -878,7 +877,7 @@ static bool rps_reset(struct intel_rps *rps)
rps->power.mode = -1;
rps->last_freq = -1;
- if (rps_set(rps, rps->min_freq)) {
+ if (rps_set(rps, rps->min_freq, true)) {
DRM_ERROR("Failed to reset RPS to initial values\n");
return false;
}
@@ -1201,7 +1200,7 @@ void intel_rps_enable(struct intel_rps *rps)
static void gen6_rps_disable(struct intel_rps *rps)
{
- intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+ set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
}
void intel_rps_disable(struct intel_rps *rps)
@@ -1566,7 +1565,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+ intel_engine_signal_breadcrumbs(gt->engine[VECS0]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1663,23 +1662,53 @@ void intel_rps_init(struct intel_rps *rps)
if (INTEL_GEN(i915) <= 7)
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
- if (INTEL_GEN(i915) >= 8)
+ if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 cagf;
- if (INTEL_GEN(i915) >= 9)
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ cagf = (rpstat >> 8) & 0xff;
+ else if (INTEL_GEN(i915) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
- return cagf;
+ return cagf;
+}
+
+static u32 read_cagf(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 freq;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_punit_get(i915);
+ freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ vlv_punit_put(i915);
+ } else {
+ freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
+ }
+
+ return intel_rps_get_cagf(rps, freq);
+}
+
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+{
+ struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
+ intel_wakeref_t wakeref;
+ u32 freq = 0;
+
+ with_intel_runtime_pm_if_in_use(rpm, wakeref)
+ freq = intel_gpu_freq(rps, read_cagf(rps));
+
+ return freq;
}
/* External interface for intel_ips.ko */
@@ -1715,6 +1744,7 @@ void intel_rps_driver_register(struct intel_rps *rps)
* set up, to avoid intel-ips sneaking in and reading bogus values.
*/
if (IS_GEN(gt->i915, 5)) {
+ GEM_BUG_ON(ips_mchdev);
rcu_assign_pointer(ips_mchdev, gt->i915);
ips_ping_for_i915_load();
}
@@ -1722,7 +1752,8 @@ void intel_rps_driver_register(struct intel_rps *rps)
void intel_rps_driver_unregister(struct intel_rps *rps)
{
- rcu_assign_pointer(ips_mchdev, NULL);
+ if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
+ rcu_assign_pointer(ips_mchdev, NULL);
}
static struct drm_i915_private *mchdev_get(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9518c66c9792..dfa98194f3b2 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -29,7 +29,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val);
-u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 649798c184fb..ee5dc4fbdeb9 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -15,6 +15,9 @@
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+
struct intel_timeline_hwsp {
struct intel_gt *gt;
struct intel_gt_timelines *gt_timelines;
@@ -23,14 +26,6 @@ struct intel_timeline_hwsp {
u64 free_bitmap;
};
-struct intel_timeline_cacheline {
- struct i915_active active;
- struct intel_timeline_hwsp *hwsp;
- void *vaddr;
-#define CACHELINE_BITS 6
-#define CACHELINE_FREE CACHELINE_BITS
-};
-
static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -133,7 +128,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
i915_active_fini(&cl->active);
- kfree(cl);
+ kfree_rcu(cl, rcu);
}
__i915_active_call
@@ -254,7 +249,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -262,7 +257,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
return 0;
}
-static void timelines_init(struct intel_gt *gt)
+void intel_gt_init_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
@@ -273,11 +268,6 @@ static void timelines_init(struct intel_gt *gt)
INIT_LIST_HEAD(&timelines->hwsp_free_list);
}
-void intel_timelines_init(struct drm_i915_private *i915)
-{
- timelines_init(&i915->gt);
-}
-
void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(atomic_read(&timeline->pin_count));
@@ -338,7 +328,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
- unsigned long flags;
/*
* Pretend we are serialised by the timeline->mutex.
@@ -364,16 +353,15 @@ void intel_timeline_enter(struct intel_timeline *tl)
if (atomic_add_unless(&tl->active_count, 1, 0))
return;
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
if (!atomic_fetch_inc(&tl->active_count))
list_add_tail(&tl->link, &timelines->active_list);
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
}
void intel_timeline_exit(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
- unsigned long flags;
/* See intel_timeline_enter() */
lockdep_assert_held(&tl->mutex);
@@ -382,10 +370,10 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (atomic_add_unless(&tl->active_count, -1, 1))
return;
- spin_lock_irqsave(&timelines->lock, flags);
+ spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- spin_unlock_irqrestore(&timelines->lock, flags);
+ spin_unlock(&timelines->lock);
/*
* Since this timeline is idle, all bariers upon which we were waiting
@@ -521,46 +509,35 @@ int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline *tl;
+ struct intel_timeline_cacheline *cl;
int err;
+ GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline));
+
rcu_read_lock();
- tl = rcu_dereference(from->timeline);
- if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
- tl = NULL;
+ cl = rcu_dereference(from->hwsp_cacheline);
+ if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
+ goto unlock; /* seqno wrapped and completed! */
+ if (unlikely(i915_request_completed(from)))
+ goto release;
rcu_read_unlock();
- if (!tl) /* already completed */
- return 1;
-
- GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
- err = -EBUSY;
- if (mutex_trylock(&tl->mutex)) {
- struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
-
- if (i915_request_completed(from)) {
- err = 1;
- goto unlock;
- }
+ err = cacheline_ref(cl, to);
+ if (err)
+ goto out;
- err = cacheline_ref(cl, to);
- if (err)
- goto unlock;
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
- if (likely(cl == tl->hwsp_cacheline)) {
- *hwsp = tl->hwsp_offset;
- } else { /* across a seqno wrap, recover the original offset */
- *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
- ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
- CACHELINE_BYTES;
- }
+out:
+ i915_active_release(&cl->active);
+ return err;
+release:
+ i915_active_release(&cl->active);
unlock:
- mutex_unlock(&tl->mutex);
- }
- intel_timeline_put(tl);
-
- return err;
+ rcu_read_unlock();
+ return 1;
}
void intel_timeline_unpin(struct intel_timeline *tl)
@@ -583,7 +560,7 @@ void __intel_timeline_free(struct kref *kref)
kfree_rcu(timeline, rcu);
}
-static void timelines_fini(struct intel_gt *gt)
+void intel_gt_fini_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
@@ -591,11 +568,6 @@ static void timelines_fini(struct intel_gt *gt)
GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
}
-void intel_timelines_fini(struct drm_i915_private *i915)
-{
- timelines_fini(&i915->gt);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f583af1ba18d..f5b7eade3809 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *until,
u32 *hwsp_offset);
-void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_fini(struct drm_i915_private *i915);
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index aaf15cbe1ce1..02181c5020db 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -10,14 +10,15 @@
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/mutex.h>
+#include <linux/rcupdate.h>
#include <linux/types.h>
#include "i915_active_types.h"
-struct drm_i915_private;
struct i915_vma;
-struct intel_timeline_cacheline;
struct i915_syncmap;
+struct intel_gt;
+struct intel_timeline_hwsp;
struct intel_timeline {
u64 fence_context;
@@ -87,4 +88,13 @@ struct intel_timeline {
struct rcu_head rcu;
};
+struct intel_timeline_cacheline {
+ struct i915_active active;
+
+ struct intel_timeline_hwsp *hwsp;
+ void *vaddr;
+
+ struct rcu_head rcu;
+};
+
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e4bccc14602f..195ccf7db272 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -146,21 +147,27 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
.mask = mask,
.val = val,
- .read = mask,
+ .read = read_mask,
};
_wa_add(wal, &wa);
}
static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ wa_add(wal, reg, mask, val, mask);
+}
+
+static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
@@ -568,9 +575,24 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ u32 val;
+
/* Wa_1409142259:tgl */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
+
+ /* Wa_1604555607:tgl */
+ val = intel_uncore_read(engine->uncore, FF_MODE2);
+ val &= ~FF_MODE2_TDS_TIMER_MASK;
+ val |= FF_MODE2_TDS_TIMER_128;
+ /*
+ * FIXME: FF_MODE2 register is not readable till TGL B0. We can
+ * enable verification of WA from the later steppings, which enables
+ * the read of FF_MODE2.
+ */
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
+ IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
+ FF_MODE2_TDS_TIMER_MASK);
}
static void
@@ -1315,6 +1337,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+
+ /*
+ * Wa_1606679103:tgl
+ * (see also Wa_1606682166:icl)
+ */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN(i915, 11)) {
@@ -1574,7 +1604,9 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_engine_pm_get(ce->engine);
rq = intel_context_create_request(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
@@ -1584,16 +1616,17 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (err)
goto err_vma;
+ i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
- goto err_vma;
+ goto err_rq;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
- goto err_vma;
+ goto err_rq;
}
err = 0;
@@ -1607,6 +1640,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
i915_gem_object_unpin_map(vma->obj);
+err_rq:
+ i915_request_put(rq);
err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 83f549d203a0..4e1eafa94be9 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -77,7 +77,7 @@ static void advance(struct i915_request *request)
i915_request_mark_complete(request);
GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->engine);
+ intel_engine_signal_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
@@ -207,16 +207,12 @@ static void mock_reset_prepare(struct intel_engine_cs *engine)
{
}
-static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
GEM_BUG_ON(stalled);
}
-static void mock_reset_finish(struct intel_engine_cs *engine)
-{
-}
-
-static void mock_cancel_requests(struct intel_engine_cs *engine)
+static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
@@ -234,6 +230,24 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_engine_release(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+
+ intel_engine_fini_retire(engine);
+ intel_engine_fini_breadcrumbs(engine);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -265,9 +279,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
engine->base.reset.prepare = mock_reset_prepare;
- engine->base.reset.reset = mock_reset;
+ engine->base.reset.rewind = mock_reset_rewind;
+ engine->base.reset.cancel = mock_reset_cancel;
engine->base.reset.finish = mock_reset_finish;
- engine->base.cancel_requests = mock_cancel_requests;
+
+ engine->base.release = mock_engine_release;
i915->gt.engine[id] = &engine->base;
i915->gt.engine_class[0][id] = &engine->base;
@@ -290,6 +306,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
+ intel_engine_init_retire(engine);
intel_engine_pool_init(&engine->pool);
ce = create_kernel_context(engine);
@@ -321,18 +338,3 @@ void mock_engine_flush(struct intel_engine_cs *engine)
void mock_engine_reset(struct intel_engine_cs *engine)
{
}
-
-void mock_engine_free(struct intel_engine_cs *engine)
-{
- struct mock_engine *mock =
- container_of(engine, typeof(*mock), base);
-
- GEM_BUG_ON(timer_pending(&mock->hw_delay));
-
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
-
- intel_engine_fini_breadcrumbs(engine);
-
- kfree(engine);
-}
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index bc720defc6b8..e874dfaa5316 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -5,6 +5,7 @@
*/
#include "i915_selftest.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
@@ -47,35 +48,36 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex);
do {
- struct dma_fence *fence;
+ struct i915_request *rq;
long timeout;
- fence = i915_active_fence_get(&tl->last_request);
- if (!fence)
+ if (list_empty(&tl->requests))
break;
- timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
+ rq = list_last_entry(&tl->requests, typeof(*rq), link);
+ i915_request_get(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
- i915_request_retire_upto(to_request(fence));
+ i915_request_retire_upto(rq);
- dma_fence_put(fence);
+ i915_request_put(rq);
} while (!err);
mutex_unlock(&tl->mutex);
return err;
}
-static int __live_context_size(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
{
struct intel_context *ce;
struct i915_request *rq;
void *vaddr;
int err;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -118,7 +120,7 @@ static int __live_context_size(struct intel_engine_cs *engine,
goto err_unpin;
/* Force the context switch */
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -143,7 +145,6 @@ static int live_context_size(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
int err = 0;
@@ -152,10 +153,6 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one.
*/
- fixme = kernel_context(gt->i915);
- if (IS_ERR(fixme))
- return PTR_ERR(fixme);
-
for_each_engine(engine, gt, id) {
struct {
struct drm_i915_gem_object *state;
@@ -180,7 +177,7 @@ static int live_context_size(void *arg)
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
- err = __live_context_size(engine, fixme);
+ err = __live_context_size(engine);
engine->context_size -= I915_GTT_PAGE_SIZE;
@@ -193,13 +190,12 @@ static int live_context_size(void *arg)
break;
}
- kernel_context_close(fixme);
return err;
}
-static int __live_active_context(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
{
+ unsigned long saved_heartbeat;
struct intel_context *ce;
int pass;
int err;
@@ -223,40 +219,55 @@ static int __live_active_context(struct intel_engine_cs *engine,
return -EINVAL;
}
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
for (pass = 0; pass <= 2; pass++) {
struct i915_request *rq;
+ intel_engine_pm_get(engine);
+
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err;
+ goto out_engine;
}
err = request_sync(rq);
if (err)
- goto err;
+ goto out_engine;
/* Context will be kept active until after an idle-barrier. */
if (i915_active_is_idle(&ce->active)) {
pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
- goto err;
+ goto out_engine;
}
if (!intel_engine_pm_is_awake(engine)) {
pr_err("%s is asleep before idle-barrier\n",
engine->name);
err = -EINVAL;
- goto err;
+ goto out_engine;
}
+
+out_engine:
+ intel_engine_pm_put(engine);
+ if (err)
+ goto err;
}
/* Now make sure our idle-barriers are flushed */
+ err = intel_engine_flush_barriers(engine);
+ if (err)
+ goto err;
+
+ /* Wait for the barrier and in the process wait for engine to park */
err = context_sync(engine->kernel_context);
if (err)
goto err;
@@ -266,12 +277,15 @@ static int __live_active_context(struct intel_engine_cs *engine,
err = -EINVAL;
}
+ intel_engine_pm_flush(engine);
+
if (intel_engine_pm_is_awake(engine)) {
struct drm_printer p = drm_debug_printer(__func__);
intel_engine_dump(engine, &p,
- "%s is still awake after idle-barriers\n",
- engine->name);
+ "%s is still awake:%d after idle-barriers\n",
+ engine->name,
+ atomic_read(&engine->wakeref.count));
GEM_TRACE_DUMP();
err = -EINVAL;
@@ -279,6 +293,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
}
err:
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
intel_context_put(ce);
return err;
}
@@ -287,23 +302,11 @@ static int live_active_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
- struct drm_file *file;
int err = 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- fixme = live_context(gt->i915, file);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
- err = __live_active_context(engine, fixme);
+ err = __live_active_context(engine);
if (err)
break;
@@ -312,8 +315,6 @@ static int live_active_context(void *arg)
break;
}
-out_file:
- mock_file_free(gt->i915, file);
return err;
}
@@ -345,10 +346,10 @@ unpin:
return err;
}
-static int __live_remote_context(struct intel_engine_cs *engine,
- struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
{
struct intel_context *local, *remote;
+ unsigned long saved_heartbeat;
int pass;
int err;
@@ -360,16 +361,26 @@ static int __live_remote_context(struct intel_engine_cs *engine,
* clobber the idle-barrier.
*/
- remote = intel_context_create(fixme, engine);
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is awake before starting %s!\n",
+ engine->name, __func__);
+ return -EINVAL;
+ }
+
+ remote = intel_context_create(engine);
if (IS_ERR(remote))
return PTR_ERR(remote);
- local = intel_context_create(fixme, engine);
+ local = intel_context_create(engine);
if (IS_ERR(local)) {
err = PTR_ERR(local);
goto err_remote;
}
+ saved_heartbeat = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+ intel_engine_pm_get(engine);
+
for (pass = 0; pass <= 2; pass++) {
err = __remote_sync(local, remote);
if (err)
@@ -387,6 +398,9 @@ static int __live_remote_context(struct intel_engine_cs *engine,
}
}
+ intel_engine_pm_put(engine);
+ engine->props.heartbeat_interval_ms = saved_heartbeat;
+
intel_context_put(local);
err_remote:
intel_context_put(remote);
@@ -397,23 +411,11 @@ static int live_remote_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
enum intel_engine_id id;
- struct drm_file *file;
int err = 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- fixme = live_context(gt->i915, file);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
- err = __live_remote_context(engine, fixme);
+ err = __live_remote_context(engine);
if (err)
break;
@@ -422,8 +424,6 @@ static int live_remote_context(void *arg)
break;
}
-out_file:
- mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 3880f07c29b8..f88e445a1cae 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -4,7 +4,365 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include <linux/sort.h>
+
+#include "intel_gt_pm.h"
+#include "intel_rps.h"
+
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
+
+#define COUNT 5
+
+static int cmp_u32(const void *A, const void *B)
+{
+ const u32 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static void perf_begin(struct intel_gt *gt)
+{
+ intel_gt_pm_get(gt);
+
+ /* Boost gpufreq to max [waitboost] and keep it fixed */
+ atomic_inc(&gt->rps.num_waiters);
+ schedule_work(&gt->rps.work);
+ flush_work(&gt->rps.work);
+}
+
+static int perf_end(struct intel_gt *gt)
+{
+ atomic_dec(&gt->rps.num_waiters);
+ intel_gt_pm_put(gt);
+
+ return igt_flush_test(gt->i915);
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 cmd;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static struct i915_vma *create_empty_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ cs[0] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 trifilter(u32 *a)
+{
+ u64 sum;
+
+ sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
+
+ sum = mul_u32_u32(a[2], 2);
+ sum += a[1];
+ sum += a[3];
+
+ return sum >> 2;
+}
+
+static int perf_mi_bb_start(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *batch;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ batch = create_empty_batch(ce);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(batch);
+ if (err) {
+ intel_engine_pm_put(engine);
+ i915_vma_put(batch);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
+ }
+ i915_vma_put(batch);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: MI_BB_START cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+static struct i915_vma *create_nop_batch(struct intel_context *ce)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_put;
+ }
+
+ memset(cs, 0, SZ_64K);
+ cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_unpin_map(obj);
+ return vma;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int perf_mi_noop(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
+ return 0;
+
+ perf_begin(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_vma *base, *nop;
+ u32 cycles[COUNT];
+ int i;
+
+ intel_engine_pm_get(engine);
+
+ base = create_empty_batch(ce);
+ if (IS_ERR(base)) {
+ err = PTR_ERR(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(base);
+ if (err) {
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ nop = create_nop_batch(ce);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ err = i915_vma_sync(nop);
+ if (err) {
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ err = write_timestamp(rq, 2);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ base->node.start, 8,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 3);
+ if (err)
+ goto out;
+
+ err = rq->engine->emit_bb_start(rq,
+ nop->node.start,
+ nop->node.size,
+ 0);
+ if (err)
+ goto out;
+
+ err = write_timestamp(rq, 4);
+ if (err)
+ goto out;
+
+out:
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+ if (err)
+ break;
+
+ cycles[i] =
+ (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
+ (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
+ }
+ i915_vma_put(nop);
+ i915_vma_put(base);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+
+ pr_info("%s: 16K MI_NOOP cycles: %u\n",
+ engine->name, trifilter(cycles));
+ }
+ if (perf_end(gt))
+ err = -EIO;
+
+ return err;
+}
+
+int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_mi_bb_start),
+ SUBTEST(perf_mi_noop),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
static int intel_mmio_bases_check(void *arg)
{
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index e864406bd2d9..43d4d589749f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -11,6 +11,28 @@
#include "intel_gt_requests.h"
#include "i915_selftest.h"
+static int timeline_sync(struct intel_timeline *tl)
+{
+ struct dma_fence *fence;
+ long timeout;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
+ return 0;
+
+ timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static int engine_sync_barrier(struct intel_engine_cs *engine)
+{
+ return timeline_sync(engine->kernel_context->timeline);
+}
+
struct pulse {
struct i915_active active;
struct kref kref;
@@ -53,9 +75,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- mutex_lock(&p->active.mutex);
- mutex_unlock(&p->active.mutex);
- flush_work(&p->active.work);
+ i915_active_unlock_wait(&p->active);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
@@ -92,7 +112,12 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
- if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+ if (engine_sync_barrier(engine)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: no heartbeat pulse?\n", engine->name);
+ intel_engine_dump(engine, &m, "%s", engine->name);
+
err = -ETIME;
goto out;
}
@@ -175,8 +200,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
int err;
int i;
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index d1752f15702a..09ff8e4f88af 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -6,6 +6,7 @@
*/
#include "selftest_llc.h"
+#include "selftest_rc6.h"
static int live_gt_resume(void *arg)
{
@@ -50,6 +51,7 @@ static int live_gt_resume(void *arg)
int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_rc6_manual),
SUBTEST(live_gt_resume),
};
@@ -58,3 +60,20 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
return intel_gt_live_subtests(tests, &i915->gt);
}
+
+int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ /*
+ * These tests may leave the system in an undesirable state.
+ * They are intended to be run last in CI and the system
+ * rebooted afterwards.
+ */
+ SUBTEST(live_rc6_ctx_wa),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 85e9ccf5c304..5dbda2a74272 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,7 +25,9 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
-#include "gt/intel_gt.h"
+
+#include "intel_gt.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
@@ -308,6 +310,24 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -377,36 +397,30 @@ static int igt_reset_nop(void *arg)
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
reset_count = i915_reset_count(global);
count = 0;
do {
for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
int i;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -414,6 +428,8 @@ static int igt_reset_nop(void *arg)
i915_request_add(rq);
}
+
+ intel_context_put(ce);
}
igt_global_reset_lock(gt);
@@ -437,10 +453,7 @@ static int igt_reset_nop(void *arg)
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- err = igt_flush_test(gt->i915);
-out:
- mock_file_free(gt->i915, file);
- if (intel_gt_is_wedged(gt))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
@@ -450,36 +463,29 @@ static int igt_reset_nop_engine(void *arg)
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct drm_file *file;
- int err = 0;
/* Check that we can engine-reset during non-user portions */
if (!intel_has_reset_engine(gt))
return 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
-
- i915_gem_context_clear_bannable(ctx);
for_each_engine(engine, gt, id) {
- unsigned int reset_count, reset_engine_count;
- unsigned int count;
+ unsigned int reset_count, reset_engine_count, count;
+ struct intel_context *ce;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -494,7 +500,7 @@ static int igt_reset_nop_engine(void *arg)
for (i = 0; i < 16; i++) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -523,22 +529,18 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+ engine_heartbeat_enable(engine, heartbeat);
- if (err)
- break;
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
- err = igt_flush_test(gt->i915);
+ intel_context_put(ce);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
if (err)
- break;
+ return err;
}
- err = igt_flush_test(gt->i915);
-out:
- mock_file_free(gt->i915, file);
- if (intel_gt_is_wedged(gt))
- err = -EIO;
- return err;
+ return 0;
}
static int __igt_reset_engine(struct intel_gt *gt, bool active)
@@ -562,6 +564,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -577,7 +580,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
- intel_engine_pm_get(engine);
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@@ -629,7 +632,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
@@ -699,43 +702,43 @@ static int active_engine(void *data)
struct active_engine *arg = data;
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
- struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
- struct drm_file *file;
- unsigned long count = 0;
+ struct intel_context *ce[ARRAY_SIZE(rq)];
+ unsigned long count;
int err = 0;
- file = mock_file(engine->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- ctx[count] = live_context(engine->i915, file);
- if (IS_ERR(ctx[count])) {
- err = PTR_ERR(ctx[count]);
+ for (count = 0; count < ARRAY_SIZE(ce); count++) {
+ ce[count] = intel_context_create(engine);
+ if (IS_ERR(ce[count])) {
+ err = PTR_ERR(ce[count]);
while (--count)
- i915_gem_context_put(ctx[count]);
- goto err_file;
+ intel_context_put(ce[count]);
+ return err;
}
}
+ count = 0;
while (!kthread_should_stop()) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
- new = igt_request_alloc(ctx[idx], engine);
+ new = intel_context_create_request(ce[idx]);
if (IS_ERR(new)) {
err = PTR_ERR(new);
break;
}
- if (arg->flags & TEST_PRIORITY)
- ctx[idx]->sched.priority =
- i915_prandom_u32_max_state(512, &prng);
-
rq[idx] = i915_request_get(new);
i915_request_add(new);
+ if (engine->schedule && arg->flags & TEST_PRIORITY) {
+ struct i915_sched_attr attr = {
+ .priority =
+ i915_prandom_u32_max_state(512, &prng),
+ };
+ engine->schedule(rq[idx], &attr);
+ }
+
err = active_request_put(old);
if (err)
break;
@@ -749,10 +752,10 @@ static int active_engine(void *data)
/* Keep the first error */
if (!err)
err = err__;
+
+ intel_context_put(ce[count]);
}
-err_file:
- mock_file_free(engine->i915, file);
return err;
}
@@ -786,6 +789,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
+ unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE &&
@@ -828,7 +832,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
- intel_engine_pm_get(engine);
+ engine_heartbeat_disable(engine, &heartbeat);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@@ -902,7 +906,8 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
+
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
@@ -1300,32 +1305,21 @@ static int igt_reset_evict_ggtt(void *arg)
static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_context *ctx;
- struct i915_address_space *vm;
- struct drm_file *file;
+ struct i915_ppgtt *ppgtt;
int err;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ /* aliasing == global gtt locking, covered above */
+ if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
+ return 0;
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out;
- }
+ ppgtt = i915_ppgtt_create(gt->i915);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- err = 0;
- vm = i915_gem_context_get_vm_rcu(ctx);
- if (!i915_is_ggtt(vm)) {
- /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(gt, vm,
- evict_vma, EXEC_OBJECT_WRITE);
- }
- i915_vm_put(vm);
+ err = __igt_reset_evict_vma(gt, &ppgtt->vm,
+ evict_vma, EXEC_OBJECT_WRITE);
+ i915_vm_put(&ppgtt->vm);
-out:
- mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index eb71ac2f992c..9ec9833c9c7b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -50,14 +50,31 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct igt_spinner spin;
- int err = -ENOMEM;
+ int err = 0;
if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
@@ -65,17 +82,20 @@ static int live_sanitycheck(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
struct i915_request *rq;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx;
+ goto out_ctx;
}
i915_request_add(rq);
@@ -84,21 +104,21 @@ static int live_sanitycheck(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915)) {
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
}
- err = 0;
-err_ctx:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -106,7 +126,6 @@ err_spin:
static int live_unlite_restore(struct intel_gt *gt, int prio)
{
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
int err = -ENOMEM;
@@ -119,15 +138,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
if (igt_spinner_init(&spin, gt))
return err;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
err = 0;
for_each_engine(engine, gt, id) {
struct intel_context *ce[2] = {};
struct i915_request *rq[2];
struct igt_live_test t;
+ unsigned long saved;
int n;
if (prio && !intel_engine_has_preemption(engine))
@@ -140,11 +156,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
+ engine_heartbeat_disable(engine, &saved);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
- tmp = intel_context_create(ctx, engine);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
@@ -247,14 +264,13 @@ err_ce:
intel_context_put(ce[n]);
}
+ engine_heartbeat_enable(engine, saved);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -309,17 +325,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
static struct i915_request *
semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
{
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
int err;
- ctx = kernel_context(engine->i915);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
- goto out_ctx;
+ goto out_ce;
err = 0;
if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +348,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (err)
rq = ERR_PTR(err);
-out_ctx:
- kernel_context_close(ctx);
+out_ce:
+ intel_context_put(ce);
return rq;
}
@@ -348,7 +364,7 @@ release_queue(struct intel_engine_cs *engine,
struct i915_request *rq;
u32 *cs;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -468,12 +484,16 @@ static int live_timeslice_preempt(void *arg)
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
+ unsigned long saved;
+
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
+ engine_heartbeat_disable(engine, &saved);
err = slice_semaphore_queue(engine, vma, count);
+ engine_heartbeat_enable(engine, saved);
if (err)
goto err_pin;
@@ -497,7 +517,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return rq;
@@ -566,17 +586,19 @@ static int live_timeslice_queue(void *arg)
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
struct i915_request *rq, *nop;
+ unsigned long saved;
if (!intel_engine_has_preemption(engine))
continue;
+ engine_heartbeat_disable(engine, &saved);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
rq = semaphore_queue(engine, vma, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_pin;
+ goto err_heartbeat;
}
engine->schedule(rq, &attr);
wait_for_submit(engine, rq);
@@ -585,8 +607,7 @@ static int live_timeslice_queue(void *arg)
nop = nop_request(engine);
if (IS_ERR(nop)) {
err = PTR_ERR(nop);
- i915_request_put(rq);
- goto err_pin;
+ goto err_rq;
}
wait_for_submit(engine, nop);
i915_request_put(nop);
@@ -596,10 +617,8 @@ static int live_timeslice_queue(void *arg)
/* Queue: semaphore signal, matching priority as semaphore */
err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err) {
- i915_request_put(rq);
- goto err_pin;
- }
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.timer.expires) &&
@@ -630,12 +649,14 @@ static int live_timeslice_queue(void *arg)
memset(vaddr, 0xff, PAGE_SIZE);
err = -EIO;
}
+err_rq:
i915_request_put(rq);
+err_heartbeat:
+ engine_heartbeat_enable(engine, saved);
if (err)
break;
}
-err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -748,15 +769,19 @@ static int live_busywait_preempt(void *arg)
*cs++ = 0;
intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
@@ -766,6 +791,7 @@ static int live_busywait_preempt(void *arg)
hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -773,6 +799,7 @@ static int live_busywait_preempt(void *arg)
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -793,11 +820,13 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
+ i915_request_put(lo);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
if (igt_live_test_end(&t)) {
err = -EIO;
@@ -1187,13 +1216,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1230,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1236,13 +1265,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* no preemption */
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1279,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = spinner_create_request(&arg->b.spin,
arg->b.ctx, arg->engine,
MI_ARB_CHECK);
@@ -1259,13 +1287,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
if (err)
goto out;
- i915_gem_context_set_banned(arg->b.ctx);
+ intel_context_set_banned(rq[1]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1308,13 +1337,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1351,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
@@ -1349,7 +1378,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq[2]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1396,13 +1425,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1439,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
@@ -1665,6 +1694,7 @@ static int live_suppress_wait_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct preempt_client client[4];
+ struct i915_request *rq[ARRAY_SIZE(client)] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -1698,7 +1728,6 @@ static int live_suppress_wait_preempt(void *arg)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *rq[ARRAY_SIZE(client)];
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
@@ -1708,18 +1737,22 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq[i])) {
- err = PTR_ERR(rq[i]);
+ struct i915_request *this;
+
+ this = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
- __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ __i915_active_fence_set(&i915_request_timeline(this)->last_request,
&dummy->fence);
- i915_request_add(rq[i]);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
}
dummy_request_free(dummy);
@@ -1740,8 +1773,11 @@ static int live_suppress_wait_preempt(void *arg)
goto err_wedged;
}
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ rq[i] = NULL;
+ }
if (igt_flush_test(gt->i915))
goto err_wedged;
@@ -1769,8 +1805,10 @@ err_client_0:
return err;
err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ }
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
@@ -1815,6 +1853,8 @@ static int live_chain_preempt(void *arg)
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
@@ -1827,8 +1867,10 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
@@ -1862,6 +1904,8 @@ static int live_chain_preempt(void *arg)
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
engine->schedule(rq, &attr);
@@ -1874,14 +1918,19 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -1890,8 +1939,11 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
}
if (igt_live_test_end(&t)) {
@@ -1915,6 +1967,201 @@ err_wedged:
goto err_client_lo;
}
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs))
+ goto err_obj;
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_obj;
+
+ rq->batch = vma;
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->client_link.next = &(*prev)->client_link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+
+ if (prio <= I915_PRIORITY_MAX)
+ continue;
+
+ if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
+ break;
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ } while (1);
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n =
+ list_next_entry(rq, client_link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_preempt_hang(void *arg)
{
struct intel_gt *gt = arg;
@@ -2391,28 +2638,18 @@ static int nop_virtual_engine(struct intel_gt *gt,
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
- struct i915_request *request[16];
- struct i915_gem_context *ctx[16];
+ struct i915_request *request[16] = {};
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(gt->i915);
- if (!ctx[n]) {
- err = -ENOMEM;
- nctx = n;
- goto out;
- }
-
- ve[n] = intel_execlists_create_virtual(ctx[n],
- siblings, nsibling);
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve[n])) {
- kernel_context_close(ctx[n]);
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
@@ -2421,7 +2658,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
- kernel_context_close(ctx[n]);
nctx = n;
goto out;
}
@@ -2437,27 +2673,35 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
}
@@ -2483,6 +2727,11 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (prime == 1)
times[0] = times[1];
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
if (__igt_timeout(end_time, NULL))
break;
}
@@ -2500,9 +2749,9 @@ out:
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
- kernel_context_close(ctx[nc]);
}
return err;
}
@@ -2561,7 +2810,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct igt_live_test t;
unsigned int n;
@@ -2572,11 +2820,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -2644,7 +2888,6 @@ out_unpin:
out_put:
intel_context_put(ve);
out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2684,7 +2927,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *last = NULL;
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct i915_vma *scratch;
struct igt_live_test t;
@@ -2692,17 +2934,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0;
u32 *cs;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
@@ -2785,8 +3021,6 @@ out_put:
intel_context_put(ve);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2838,16 +3072,54 @@ static int bond_virtual_engine(struct intel_gt *gt,
#define BOND_SCHEDULE BIT(0)
{
struct intel_engine_cs *master;
- struct i915_gem_context *ctx;
struct i915_request *rq[16];
enum intel_engine_id id;
+ struct igt_spinner spin;
unsigned long n;
int err;
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(gt->i915);
- if (!ctx)
+ if (igt_spinner_init(&spin, gt))
return -ENOMEM;
err = 0;
@@ -2860,7 +3132,9 @@ static int bond_virtual_engine(struct intel_gt *gt,
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_request_alloc(ctx, master);
+ rq[0] = igt_spinner_create_request(&spin,
+ master->kernel_context,
+ MI_NOOP);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -2873,16 +3147,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
&fence,
GFP_KERNEL);
}
+
i915_request_add(rq[0]);
if (err < 0)
goto out;
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
for (n = 0; n < nsibling; n++) {
struct intel_context *ve;
- ve = intel_execlists_create_virtual(ctx,
- siblings,
- nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
onstack_fence_fini(&fence);
@@ -2924,6 +3203,8 @@ static int bond_virtual_engine(struct intel_gt *gt,
}
}
onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
pr_err("Master request did not execute (on %s)!\n",
@@ -2960,7 +3241,7 @@ out:
if (igt_flush_test(gt->i915))
err = -EIO;
- kernel_context_close(ctx);
+ igt_spinner_fini(&spin);
return err;
}
@@ -3028,6 +3309,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
@@ -3207,12 +3489,12 @@ static int live_lrc_fixed(void *arg)
} tbl[] = {
{
i915_mmio_reg_offset(RING_START(engine->mmio_base)),
- CTX_RING_BUFFER_START - 1,
+ CTX_RING_START - 1,
"RING_START"
},
{
i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
- CTX_RING_BUFFER_CONTROL - 1,
+ CTX_RING_CTL - 1,
"RING_CTL"
},
{
@@ -3231,7 +3513,7 @@ static int live_lrc_fixed(void *arg)
"RING_MI_MODE"
},
{
- engine->mmio_base + 0x110,
+ i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
CTX_BB_STATE - 1,
"BB_STATE"
},
@@ -3270,8 +3552,7 @@ static int live_lrc_fixed(void *arg)
return err;
}
-static int __live_lrc_state(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3286,7 +3567,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
int err;
int n;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3360,7 +3641,6 @@ static int live_lrc_state(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3370,18 +3650,12 @@ static int live_lrc_state(void *arg)
* intel_context.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_lrc_state(fixme, engine, scratch);
+ err = __live_lrc_state(engine, scratch);
if (err)
break;
}
@@ -3390,8 +3664,6 @@ static int live_lrc_state(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}
@@ -3401,7 +3673,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
u32 *cs;
int n;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3424,8 +3696,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
return 0;
}
-static int __live_gpr_clear(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3441,7 +3712,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
if (err)
return err;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3503,7 +3774,6 @@ static int live_gpr_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3513,18 +3783,12 @@ static int live_gpr_clear(void *arg)
* to avoid leaking any information from previous contexts.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(fixme, engine, scratch);
+ err = __live_gpr_clear(engine, scratch);
if (err)
break;
}
@@ -3533,8 +3797,6 @@ static int live_gpr_clear(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
new file mode 100644
index 000000000000..de1f83100fb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -0,0 +1,419 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "i915_selftest.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+
+struct live_mocs {
+ struct drm_i915_mocs_table table;
+ struct i915_vma *scratch;
+ void *vaddr;
+};
+
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
+{
+ int err;
+
+ if (!get_mocs_settings(gt->i915, &arg->table))
+ return -EINVAL;
+
+ arg->scratch = create_scratch(gt);
+ if (IS_ERR(arg->scratch))
+ return PTR_ERR(arg->scratch);
+
+ arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
+ if (IS_ERR(arg->vaddr)) {
+ err = PTR_ERR(arg->vaddr);
+ goto err_scratch;
+ }
+
+ return 0;
+
+err_scratch:
+ i915_vma_unpin_and_release(&arg->scratch, 0);
+ return err;
+}
+
+static void live_mocs_fini(struct live_mocs *arg)
+{
+ i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
+}
+
+static int read_regs(struct i915_request *rq,
+ u32 addr, unsigned int count,
+ uint32_t *offset)
+{
+ unsigned int i;
+ u32 *cs;
+
+ GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0; i < count; i++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = *offset;
+ *cs++ = 0;
+
+ addr += sizeof(u32);
+ *offset += sizeof(u32);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int read_mocs_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr;
+
+ if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
+ addr = global_mocs_offset();
+ else
+ addr = mocs_offset(rq->engine);
+
+ return read_regs(rq, addr, table->n_entries, offset);
+}
+
+static int read_l3cc_table(struct i915_request *rq,
+ const struct drm_i915_mocs_table *table,
+ uint32_t *offset)
+{
+ u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+
+ return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
+}
+
+static int check_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ unsigned int i;
+ u32 expect;
+
+ for_each_mocs(expect, table, i) {
+ if (**vaddr != expect) {
+ pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ }
+
+ return 0;
+}
+
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
+}
+
+static int check_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table,
+ uint32_t **vaddr)
+{
+ /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
+ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
+ unsigned int i;
+ u32 expect;
+
+ for_each_l3cc(expect, table, i) {
+ if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
+ pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
+ engine->name, i, **vaddr, expect);
+ return -EINVAL;
+ }
+ ++*vaddr;
+ reg += 4;
+ }
+
+ return 0;
+}
+
+static int check_mocs_engine(struct live_mocs *arg,
+ struct intel_context *ce)
+{
+ struct i915_vma *vma = arg->scratch;
+ struct i915_request *rq;
+ u32 offset;
+ u32 *vaddr;
+ int err;
+
+ memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+
+ /* Read the mocs tables back using SRM */
+ offset = i915_ggtt_offset(vma);
+ if (!err)
+ err = read_mocs_table(rq, &arg->table, &offset);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = read_l3cc_table(rq, &arg->table, &offset);
+ offset -= i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset > PAGE_SIZE);
+
+ err = request_add_sync(rq, err);
+ if (err)
+ return err;
+
+ /* Compare the results against the expected tables */
+ vaddr = arg->vaddr;
+ if (!err)
+ err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ if (!err && ce->engine->class == RENDER_CLASS)
+ err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(arg->vaddr + offset != vaddr);
+ return 0;
+}
+
+static int live_mocs_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Basic check the system is configured with the expected mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = check_mocs_engine(&mocs, engine->kernel_context);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int live_mocs_clean(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err;
+
+ /* Every new context should see the same mocs table */
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = check_mocs_engine(&mocs, ce);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+static int active_engine_reset(struct intel_context *ce,
+ const char *reason)
+{
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err;
+
+ err = igt_spinner_init(&spin, ce->engine->gt);
+ if (err)
+ return err;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ if (IS_ERR(rq)) {
+ igt_spinner_fini(&spin);
+ return PTR_ERR(rq);
+ }
+
+ err = request_add_spin(rq, &spin);
+ if (err == 0)
+ err = intel_engine_reset(ce->engine, reason);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ return err;
+}
+
+static int __live_mocs_reset(struct live_mocs *mocs,
+ struct intel_context *ce)
+{
+ int err;
+
+ err = intel_engine_reset(ce->engine, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ err = active_engine_reset(ce, "mocs");
+ if (err)
+ return err;
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
+
+ err = check_mocs_engine(mocs, ce);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int live_mocs_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct live_mocs mocs;
+ int err = 0;
+
+ /* Check the mocs setup is retained over per-engine and global resets */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ err = live_mocs_init(&mocs, gt);
+ if (err)
+ return err;
+
+ igt_global_reset_lock(gt);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ intel_engine_pm_get(engine);
+ err = __live_mocs_reset(&mocs, ce);
+ intel_engine_pm_put(engine);
+
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+ igt_global_reset_unlock(gt);
+
+ live_mocs_fini(&mocs);
+ return err;
+}
+
+int intel_mocs_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_mocs_kernel),
+ SUBTEST(live_mocs_clean),
+ SUBTEST(live_mocs_reset),
+ };
+ struct drm_i915_mocs_table table;
+
+ if (!get_mocs_settings(i915, &table))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
new file mode 100644
index 000000000000..8cc55a0e9e06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -0,0 +1,203 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
+#include "selftest_rc6.h"
+
+#include "selftests/i915_random.h"
+
+int live_rc6_manual(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_rc6 *rc6 = &gt->rc6;
+ intel_wakeref_t wakeref;
+ u64 res[2];
+ int err = 0;
+
+ /*
+ * Our claim is that we can "encourage" the GPU to enter rc6 at will.
+ * Let's try it!
+ */
+
+ if (!rc6->enabled)
+ return 0;
+
+ /* bsw/byt use a PCU and decouple RC6 from our manual control */
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ /* Force RC6 off for starters */
+ __intel_rc6_disable(rc6);
+ msleep(1); /* wakeup is not immediate, takes about 100us on icl */
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(250);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ if ((res[1] - res[0]) >> 10) {
+ pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
+ (res[1] - res[0]) >> 10);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Manually enter RC6 */
+ intel_rc6_park(rc6);
+
+ res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+ msleep(100);
+ res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
+
+ if (res[1] == res[0]) {
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ err = -EINVAL;
+ }
+
+ /* Restore what should have been the original state! */
+ intel_rc6_unpark(rc6);
+
+out_unlock:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return err;
+}
+
+static const u32 *__live_rc6_ctx(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ const u32 *result;
+ u32 cmd;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return ERR_CAST(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return cs;
+ }
+
+ cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ cmd++;
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(GEN8_RC6_CTX_INFO);
+ *cs++ = ce->timeline->hwsp_offset + 8;
+ *cs++ = 0;
+ intel_ring_advance(rq, cs);
+
+ result = rq->hwsp_seqno + 2;
+ i915_request_add(rq);
+
+ return result;
+}
+
+static struct intel_engine_cs **
+randomised_engines(struct intel_gt *gt,
+ struct rnd_state *prng,
+ unsigned int *count)
+{
+ struct intel_engine_cs *engine, **engines;
+ enum intel_engine_id id;
+ int n;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ n++;
+ if (!n)
+ return NULL;
+
+ engines = kmalloc_array(n, sizeof(*engines), GFP_KERNEL);
+ if (!engines)
+ return NULL;
+
+ n = 0;
+ for_each_engine(engine, gt, id)
+ engines[n++] = engine;
+
+ i915_prandom_shuffle(engines, sizeof(*engines), n, prng);
+
+ *count = n;
+ return engines;
+}
+
+int live_rc6_ctx_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs **engines;
+ unsigned int n, count;
+ I915_RND_STATE(prng);
+ int err = 0;
+
+ /* A read of CTX_INFO upsets rc6. Poke the bear! */
+ if (INTEL_GEN(gt->i915) < 8)
+ return 0;
+
+ engines = randomised_engines(gt, &prng, &count);
+ if (!engines)
+ return 0;
+
+ for (n = 0; n < count; n++) {
+ struct intel_engine_cs *engine = engines[n];
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ struct intel_context *ce;
+ unsigned int resets =
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine);
+ const u32 *res;
+
+ /* Use a sacrifical context */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ intel_engine_pm_get(engine);
+ res = __live_rc6_ctx(ce);
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ if (IS_ERR(res)) {
+ err = PTR_ERR(res);
+ goto out;
+ }
+
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+ pr_debug("%s: CTX_INFO=%0x\n",
+ engine->name, READ_ONCE(*res));
+
+ if (resets !=
+ i915_reset_engine_count(&gt->i915->gpu_error,
+ engine)) {
+ pr_err("%s: GPU reset required\n",
+ engine->name);
+ add_taint_for_CI(TAINT_WARN);
+ err = -EIO;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(engines);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.h b/drivers/gpu/drm/i915/gt/selftest_rc6.h
new file mode 100644
index 000000000000..762fd442d7b2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_RC6_H
+#define SELFTEST_RC6_H
+
+int live_rc6_ctx_wa(void *arg);
+int live_rc6_manual(void *arg);
+
+#endif /* SELFTEST_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index f04a59fe5d2c..e2d78cc22fb4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -458,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
@@ -675,9 +675,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- intel_engine_pm_get(engine);
- rq = i915_request_create(engine->kernel_context);
- intel_engine_pm_put(engine);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index abce6e4ec9c0..ac1921854cbf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -264,22 +264,15 @@ static int
switch_to_scratch_context(struct intel_engine_cs *engine,
struct igt_spinner *spin)
{
- struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq;
int err = 0;
- ctx = kernel_context(engine->i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
-
- ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
- GEM_BUG_ON(IS_ERR(ce));
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
-
intel_context_put(ce);
if (IS_ERR(rq)) {
@@ -293,7 +286,6 @@ err:
if (err && spin)
igt_spinner_end(spin);
- kernel_context_close(ctx);
return err;
}
@@ -367,20 +359,17 @@ out_ctx:
return err;
}
-static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+static struct i915_vma *create_batch(struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
- i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -452,8 +441,7 @@ static int whitelist_writable_count(struct intel_engine_cs *engine)
return count;
}
-static int check_dirty_whitelist(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int check_dirty_whitelist(struct intel_context *ce)
{
const u32 values[] = {
0x00000000,
@@ -481,19 +469,17 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
- struct i915_address_space *vm;
+ struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- vm = i915_gem_context_get_vm_rcu(ctx);
- scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
- i915_vm_put(vm);
+ scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
- batch = create_batch(ctx);
+ batch = create_batch(ce->vm);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_scratch;
@@ -518,7 +504,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
- if (INTEL_GEN(ctx->i915) >= 8)
+ if (INTEL_GEN(engine->i915) >= 8)
lrm++, srm++;
pr_debug("%s: Writing garbage to %x\n",
@@ -577,7 +563,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(engine->gt);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
@@ -696,7 +682,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915))
+ if (igt_flush_test(engine->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -709,38 +695,31 @@ static int live_dirty_whitelist(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct drm_file *file;
- int err = 0;
/* Can the user write to the whitelisted registers? */
if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- file = mock_file(gt->i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- ctx = live_context(gt->i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_file;
- }
-
for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ int err;
+
if (engine->whitelist.count == 0)
continue;
- err = check_dirty_whitelist(ctx, engine);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = check_dirty_whitelist(ce);
+ intel_context_put(ce);
if (err)
- goto out_file;
+ return err;
}
-out_file:
- mock_file_free(gt->i915, file);
- return err;
+ return 0;
}
static int live_reset_whitelist(void *arg)
@@ -830,12 +809,15 @@ err_req:
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *batch;
int i, err = 0;
u32 *cs;
- batch = create_batch(ctx);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ batch = create_batch(vm);
+ i915_vm_put(vm);
if (IS_ERR(batch))
return PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 2a77c051f36a..aeb1d1f616e8 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
index 689efc66c908..d2bcc3df6183 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -7,6 +7,8 @@
#ifndef __MOCK_TIMELINE__
#define __MOCK_TIMELINE__
+#include <linux/types.h>
+
struct intel_timeline;
void mock_timeline_init(struct intel_timeline *timeline, u64 context);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3ee4a4e7689d..5d00a3b2d914 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -32,18 +32,17 @@
* just the HuC, but more are expected to land in the future).
*/
-static void gen8_guc_raise_irq(struct intel_guc *guc)
+void intel_guc_notify(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
-}
-
-static void gen11_guc_raise_irq(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
+ /*
+ * On Gen11+, the value written to the register is passes as a payload
+ * to the FW. However, the FW currently treats all values the same way
+ * (H2G interrupt), so we can just write the value that the HW expects
+ * on older gens.
+ */
+ intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -177,15 +176,13 @@ void intel_guc_init_early(struct intel_guc *guc)
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
if (INTEL_GEN(i915) >= 11) {
- guc->notify = gen11_guc_raise_irq;
+ guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
guc->interrupts.reset = gen11_reset_guc_interrupts;
guc->interrupts.enable = gen11_enable_guc_interrupts;
guc->interrupts.disable = gen11_disable_guc_interrupts;
} else {
- guc->notify = gen8_guc_raise_irq;
+ guc->notify_reg = GUC_SEND_INTERRUPT;
guc->interrupts.reset = gen9_reset_guc_interrupts;
guc->interrupts.enable = gen9_enable_guc_interrupts;
guc->interrupts.disable = gen9_disable_guc_interrupts;
@@ -401,18 +398,8 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
-}
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
-{
- WARN(1, "Unexpected send: action=%#x\n", *action);
- return -ENODEV;
-}
-
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
-{
- WARN(1, "Unexpected event: no suitable handler\n");
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -704,3 +691,37 @@ err:
i915_gem_object_put(obj);
return vma;
}
+
+/**
+ * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ * @out_vma: return variable for the allocated vma pointer
+ * @out_vaddr: return variable for the obj mapping
+ *
+ * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
+ * object with I915_MAP_WB.
+ *
+ * Return: 0 if successful, a negative errno code otherwise.
+ */
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma, 0);
+ return PTR_ERR(vaddr);
+ }
+
+ *out_vma = vma;
+ *out_vaddr = vaddr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e6400204a2bd..910d49590068 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -20,8 +20,8 @@ struct __guc_ads_blob;
/*
* Top level structure of GuC. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
- * ExecList submission.
+ * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
+ * submission.
*/
struct intel_guc {
struct intel_uc_fw fw;
@@ -46,13 +46,13 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
- struct ida stage_ids;
- struct intel_guc_client *execbuf_client;
+ struct i915_vma *workqueue;
+ void *workqueue_vaddr;
+ spinlock_t wq_lock;
- DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
- /* Cyclic counter mod pagesize */
- u32 db_cacheline;
+ struct i915_vma *proc_desc;
+ void *proc_desc_vaddr;
/* Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
@@ -64,44 +64,33 @@ struct intel_guc {
enum forcewake_domains fw_domains;
} send_regs;
+ /* register used to send interrupts to the GuC FW */
+ i915_reg_t notify_reg;
+
/* Store msg (e.g. log flush) that we see while CTBs are disabled */
u32 mmio_msg;
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
-
- /* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
- u32 *response_buf, u32 response_buf_size);
-
- /* GuC's FW specific event handler function */
- void (*handler)(struct intel_guc *guc);
-
- /* GuC's FW specific notify function */
- void (*notify)(struct intel_guc *guc);
};
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
- return guc->send(guc, action, len, NULL, 0);
+ return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
}
static inline int
intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- return guc->send(guc, action, len, response_buf, response_buf_size);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
- guc->notify(guc);
+ return intel_guc_ct_send(&guc->ct, action, len,
+ response_buf, response_buf_size);
}
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
{
- guc->handler(guc);
+ intel_guc_ct_event_handler(&guc->ct);
}
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
@@ -136,12 +125,9 @@ void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_write_params(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size);
+void intel_guc_notify(struct intel_guc *guc);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -149,6 +135,8 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
+ struct i915_vma **out_vma, void **out_vaddr);
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ca6674b8e00c..101728006ae9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -93,7 +93,8 @@ static void __guc_ads_init(struct intel_guc *guc)
*/
blob->ads.golden_context_lrca[engine_class] = 0;
blob->ads.eng_state_size[engine_class] =
- intel_engine_context_size(dev_priv, engine_class) -
+ intel_engine_context_size(guc_to_gt(guc),
+ engine_class) -
skipped_size;
}
@@ -135,32 +136,19 @@ static void __guc_ads_init(struct intel_guc *guc)
int intel_guc_ads_create(struct intel_guc *guc)
{
const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
- struct i915_vma *vma;
- void *blob;
int ret;
GEM_BUG_ON(guc->ads_vma);
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
+ (void **)&guc->ads_blob);
- blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(blob)) {
- ret = PTR_ERR(blob);
- goto err_vma;
- }
-
- guc->ads_vma = vma;
- guc->ads_blob = blob;
+ if (ret)
+ return ret;
__guc_ads_init(guc);
return 0;
-
-err_vma:
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
- return ret;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index b49115517510..c6f971a049f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -37,13 +37,10 @@ static void ct_incoming_request_worker_func(struct work_struct *w);
*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
- /* we're using static channel owners */
- ct->host_channel.owner = CTB_OWNER_HOST;
-
- spin_lock_init(&ct->lock);
- INIT_LIST_HEAD(&ct->pending_requests);
- INIT_LIST_HEAD(&ct->incoming_requests);
- INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
+ spin_lock_init(&ct->requests.lock);
+ INIT_LIST_HEAD(&ct->requests.pending);
+ INIT_LIST_HEAD(&ct->requests.incoming);
+ INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
}
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
@@ -64,14 +61,13 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
}
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
- u32 cmds_addr, u32 size, u32 owner)
+ u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
- desc, cmds_addr, size, owner);
+ CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
- desc->owner = owner;
+ desc->owner = CTB_OWNER_HOST;
}
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
@@ -104,12 +100,11 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
}
static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 owner,
u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
- owner,
+ CTB_OWNER_HOST,
type
};
int err;
@@ -117,20 +112,27 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
/* Can't use generic send(), CT deregistration must go over MMIO */
err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (err)
- DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
- guc_ct_buffer_type_to_str(type), owner, err);
+ DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int ctch_init(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_init - Init buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for buffer-based communication.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
{
- struct i915_vma *vma;
+ struct intel_guc *guc = ct_to_guc(ct);
void *blob;
int err;
int i;
- GEM_BUG_ON(ctch->vma);
+ GEM_BUG_ON(ct->vma);
/* We allocate 1 page to hold both descriptors and both buffers.
* ___________.....................
@@ -154,71 +156,65 @@ static int ctch_init(struct intel_guc *guc,
* other code will need updating as well.
*/
- /* allocate vma */
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_out;
+ err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
+ if (err) {
+ DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ return err;
}
- ctch->vma = vma;
- /* map first page */
- blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(blob)) {
- err = PTR_ERR(blob);
- goto err_vma;
- }
CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ctch->vma));
+ intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
- GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
- ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
+ GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
+ ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+ ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
}
return 0;
-
-err_vma:
- i915_vma_unpin_and_release(&ctch->vma, 0);
-err_out:
- CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
- ctch->owner, err);
- return err;
}
-static void ctch_fini(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_fini - Fini buffer-based communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for buffer-based communication.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
- i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
+ i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_enable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_enable - Enable buffer based command transport.
+ * @ct: pointer to CT struct
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
+ struct intel_guc *guc = ct_to_guc(ct);
u32 base;
int err;
int i;
- GEM_BUG_ON(!ctch->vma);
-
- GEM_BUG_ON(ctch->enabled);
+ GEM_BUG_ON(ct->enabled);
/* vma should be already allocated and map'ed */
- base = intel_guc_ggtt_offset(guc, ctch->vma);
+ GEM_BUG_ON(!ct->vma);
+ base = intel_guc_ggtt_offset(guc, ct->vma);
/* (re)initialize descriptors
* cmds buffers are in the second half of the blob page
*/
- for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc,
base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4,
- ctch->owner);
+ PAGE_SIZE/4);
}
/* register buffers, starting wirh RECV buffer
@@ -236,38 +232,42 @@ static int ctch_enable(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
- ctch->enabled = true;
+ ct->enabled = true;
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
- ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+ DRM_ERROR("CT: can't open channel; err=%d\n", err);
return err;
}
-static void ctch_disable(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+/**
+ * intel_guc_ct_disable - Disable buffer based command transport.
+ * @ct: pointer to CT struct
+ */
+void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
- GEM_BUG_ON(!ctch->enabled);
+ struct intel_guc *guc = ct_to_guc(ct);
- ctch->enabled = false;
+ GEM_BUG_ON(!ct->enabled);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- ctch->owner,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct->enabled = false;
+
+ if (intel_guc_is_running(guc)) {
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ guc_action_deregister_ct_buffer(guc,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ }
}
-static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ctch->next_fence;
+ return ++ct->requests.next_fence;
}
/**
@@ -440,35 +440,34 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
return err;
}
-static int ctch_send(struct intel_guc_ct *ct,
- struct intel_guc_ct_channel *ctch,
- const u32 *action,
- u32 len,
- u32 *response_buf,
- u32 response_buf_size,
- u32 *status)
+static int ct_send(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len,
+ u32 *response_buf,
+ u32 response_buf_size,
+ u32 *status)
{
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
struct ct_request request;
unsigned long flags;
u32 fence;
int err;
- GEM_BUG_ON(!ctch->enabled);
+ GEM_BUG_ON(!ct->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
- fence = ctch_get_next_fence(ctch);
+ fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request.link, &ct->pending_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request.link, &ct->requests.pending);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
err = ctb_write(ctb, action, len, fence, !!response_buf);
if (unlikely(err))
@@ -501,9 +500,9 @@ static int ctch_send(struct intel_guc_ct *ct,
}
unlink:
- spin_lock_irqsave(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
list_del(&request.link);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
return err;
}
@@ -511,18 +510,21 @@ unlink:
/*
* Command Transport (CT) buffer based GuC send function.
*/
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct intel_guc_ct *ct = &guc->ct;
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ struct intel_guc *guc = ct_to_guc(ct);
u32 status = ~0; /* undefined */
int ret;
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+ }
+
mutex_lock(&guc->send_mutex);
- ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
- &status);
+ ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
action[0], ret, status);
@@ -653,8 +655,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
- spin_lock(&ct->lock);
- list_for_each_entry(req, &ct->pending_requests, link) {
+ spin_lock(&ct->requests.lock);
+ list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG_DRIVER("CT: request %u awaits response\n",
req->fence);
@@ -672,7 +674,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true;
break;
}
- spin_unlock(&ct->lock);
+ spin_unlock(&ct->requests.lock);
if (!found)
DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
@@ -710,13 +712,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
u32 *payload;
bool done;
- spin_lock_irqsave(&ct->lock, flags);
- request = list_first_entry_or_null(&ct->incoming_requests,
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ request = list_first_entry_or_null(&ct->requests.incoming,
struct ct_incoming_request, link);
if (request)
list_del(&request->link);
- done = !!list_empty(&ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ done = !!list_empty(&ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!request)
return true;
@@ -734,12 +736,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
static void ct_incoming_request_worker_func(struct work_struct *w)
{
- struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
+ struct intel_guc_ct *ct =
+ container_of(w, struct intel_guc_ct, requests.worker);
bool done;
done = ct_process_incoming_requests(ct);
if (!done)
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
}
/**
@@ -777,23 +780,28 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
}
memcpy(request->msg, msg, 4 * msglen);
- spin_lock_irqsave(&ct->lock, flags);
- list_add_tail(&request->link, &ct->incoming_requests);
- spin_unlock_irqrestore(&ct->lock, flags);
+ spin_lock_irqsave(&ct->requests.lock, flags);
+ list_add_tail(&request->link, &ct->requests.incoming);
+ spin_unlock_irqrestore(&ct->requests.lock, flags);
- queue_work(system_unbound_wq, &ct->worker);
+ queue_work(system_unbound_wq, &ct->requests.worker);
return 0;
}
-static void ct_process_host_channel(struct intel_guc_ct *ct)
+/*
+ * When we're communicating with the GuC over CT, GuC uses events
+ * to notify us about new messages being posted on the RECV buffer.
+ */
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch->enabled)
+ if (unlikely(!ct->enabled)) {
+ WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
+ }
do {
err = ctb_read(ctb, msg);
@@ -812,86 +820,3 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
}
}
-/*
- * When we're communicating with the GuC over CT, GuC uses events
- * to notify us about new messages being posted on the RECV buffer.
- */
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
-{
- struct intel_guc_ct *ct = &guc->ct;
-
- ct_process_host_channel(ct);
-}
-
-/**
- * intel_guc_ct_init - Init CT communication
- * @ct: pointer to CT struct
- *
- * Allocate memory required for communication via
- * the CT channel.
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_init(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
-
- err = ctch_init(guc, ctch);
- if (unlikely(err)) {
- DRM_ERROR("CT: can't open channel %d; err=%d\n",
- ctch->owner, err);
- return err;
- }
-
- GEM_BUG_ON(!ctch->vma);
- return 0;
-}
-
-/**
- * intel_guc_ct_fini - Fini CT communication
- * @ct: pointer to CT struct
- *
- * Deallocate memory required for communication via
- * the CT channel.
- */
-void intel_guc_ct_fini(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- ctch_fini(guc, ctch);
-}
-
-/**
- * intel_guc_ct_enable - Enable buffer based command transport.
- * @ct: pointer to CT struct
- *
- * Return: 0 on success, a negative errno code on failure.
- */
-int intel_guc_ct_enable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- if (ctch->enabled)
- return 0;
-
- return ctch_enable(guc, ctch);
-}
-
-/**
- * intel_guc_ct_disable - Disable buffer based command transport.
- * @ct: pointer to CT struct
- */
-void intel_guc_ct_disable(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_guc_ct_channel *ctch = &ct->host_channel;
-
- if (!ctch->enabled)
- return;
-
- ctch_disable(guc, ctch);
-}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 7c24d83f5c24..3e7fe237cfa5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -35,44 +35,28 @@ struct intel_guc_ct_buffer {
u32 *cmds;
};
-/** Represents pair of command transport buffers.
- *
- * Buffers go in pairs to allow bi-directional communication.
- * To simplify the code we place both of them in the same vma.
- * Buffers from the same pair must share unique owner id.
- *
- * @vma: pointer to the vma with pair of CT buffers
- * @ctbs: buffers for sending(0) and receiving(1) commands
- * @owner: unique identifier
- * @next_fence: fence to be used with next send command
- */
-struct intel_guc_ct_channel {
- struct i915_vma *vma;
- struct intel_guc_ct_buffer ctbs[2];
- u32 owner;
- u32 next_fence;
- bool enabled;
-};
-/** Holds all command transport channels.
+/** Top-level structure for Command Transport related data
*
- * @host_channel: main channel used by the host
+ * Includes a pair of CT buffers for bi-directional communication and tracking
+ * for the H2G and G2H requests sent and received through the buffers.
*/
struct intel_guc_ct {
- struct intel_guc_ct_channel host_channel;
- /* other channels are tbd */
+ struct i915_vma *vma;
+ bool enabled;
- /** @lock: protects pending requests list */
- spinlock_t lock;
+ /* buffers for sending(0) and receiving(1) commands */
+ struct intel_guc_ct_buffer ctbs[2];
- /** @pending_requests: list of requests waiting for response */
- struct list_head pending_requests;
+ struct {
+ u32 next_fence; /* fence to be used with next request to send */
- /** @incoming_requests: list of incoming requests */
- struct list_head incoming_requests;
+ spinlock_t lock; /* protects pending requests list */
+ struct list_head pending; /* requests waiting for response */
- /** @worker: worker for handling incoming requests */
- struct work_struct worker;
+ struct list_head incoming; /* incoming requests */
+ struct work_struct worker; /* handler for incoming requests */
+ } requests;
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
@@ -81,13 +65,13 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
-static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
+static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
{
- ct->host_channel.enabled = false;
+ return ct->enabled;
}
-int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
-void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 5528224448f6..3a1c47d600ea 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -149,7 +149,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
* Current uCode expects the code to be loaded at 8k; locations below
* this are used for the stack.
*/
- ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
+ ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index a26a85d50209..a6b733c146c9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -31,7 +31,6 @@
#define GUC_DOORBELL_INVALID 256
-#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
/* Work queue item header definitions */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2498c55e0ea5..9e42324fdecd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -18,15 +18,6 @@
#include "i915_drv.h"
#include "i915_trace.h"
-enum {
- GUC_PREEMPT_NONE = 0,
- GUC_PREEMPT_INPROGRESS,
- GUC_PREEMPT_FINISHED,
-};
-#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
-#define GUC_PREEMPT_BREADCRUMB_BYTES \
- (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
-
/**
* DOC: GuC-based command submission
*
@@ -36,25 +27,14 @@ enum {
* code) matches the old submission model and will be updated as part of the
* upgrade to the new flow.
*
- * GuC client:
- * A intel_guc_client refers to a submission path through GuC. Currently, there
- * is only one client, which is charged with all submissions to the GuC. This
- * struct is the owner of a doorbell, a process descriptor and a workqueue (all
- * of them inside a single gem object that contains all required pages for these
- * elements).
- *
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC.
- * Currently, there exists a 1:1 mapping between a intel_guc_client and a
- * guc_stage_desc (via the client's stage_id), so effectively only one
- * gets used. This stage descriptor lets the GuC know about the doorbell,
- * workqueue and process descriptor. Theoretically, it also lets the GuC
- * know about our HW contexts (context ID, etc...), but we actually
- * employ a kind of submission where the GuC uses the LRCA sent via the work
- * item instead (the single guc_stage_desc associated to execbuf client
- * contains information about the default kernel context only, but this is
- * essentially unused). This is called a "proxy" submission.
+ * descriptors, and shares them with the GuC. Currently, we only use one
+ * descriptor. This stage descriptor lets the GuC know about the workqueue and
+ * process descriptor. Theoretically, it also lets the GuC know about our HW
+ * contexts (context ID, etc...), but we actually employ a kind of submission
+ * where the GuC uses the LRCA sent via the work item instead. This is called
+ * a "proxy" submission.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -63,11 +43,6 @@ enum {
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
- * See intel_guc_send()
- *
- * Doorbells:
- * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
- * mapped into process space.
*
* Work Items:
* There are several types of work items that the host may place into a
@@ -84,213 +59,45 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
-static inline bool is_high_priority(struct intel_guc_client *client)
+static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
{
- return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
- client->priority == GUC_CLIENT_PRIORITY_HIGH);
-}
-
-static int reserve_doorbell(struct intel_guc_client *client)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
+ struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (is_high_priority(client)) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
- if (id == end)
- return -ENOSPC;
-
- __set_bit(id, client->guc->doorbell_bitmap);
- client->doorbell_id = id;
- DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
- client->stage_id, yesno(is_high_priority(client)),
- id);
- return 0;
+ return &base[id];
}
-static bool has_doorbell(struct intel_guc_client *client)
+static int guc_workqueue_create(struct intel_guc *guc)
{
- if (client->doorbell_id == GUC_DOORBELL_INVALID)
- return false;
-
- return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+ return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue,
+ &guc->workqueue_vaddr);
}
-static void unreserve_doorbell(struct intel_guc_client *client)
+static void guc_workqueue_destroy(struct intel_guc *guc)
{
- GEM_BUG_ON(!has_doorbell(client));
-
- __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
- client->doorbell_id = GUC_DOORBELL_INVALID;
+ i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP);
}
/*
- * Tell the GuC to allocate or deallocate a specific doorbell
- */
-
-static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
-{
- u32 action[] = {
- INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- stage_id
- };
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
-{
- struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
-
- return &base[client->stage_id];
-}
-
-/*
- * Initialise, update, or clear doorbell data shared with the GuC
- *
- * These functions modify shared data and so need access to the mapped
- * client object which contains the page being used for the doorbell
+ * Initialise the process descriptor shared with the GuC firmware.
*/
-
-static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
-{
- struct guc_stage_desc *desc;
-
- /* Update the GuC's idea of the doorbell ID */
- desc = __get_stage_desc(client);
- desc->db_id = new_id;
-}
-
-static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
-{
- return client->vaddr + client->doorbell_offset;
-}
-
-static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
-{
- struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
-}
-
-static void __init_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = 0;
-}
-
-static void __fini_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *doorbell;
- u16 db_id = client->doorbell_id;
-
- doorbell = __get_doorbell(client);
- doorbell->db_status = GUC_DOORBELL_DISABLED;
-
- /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
- * to go to zero after updating db_status before we call the GuC to
- * release the doorbell
- */
- if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
- WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-}
-
-static int create_doorbell(struct intel_guc_client *client)
+static int guc_proc_desc_create(struct intel_guc *guc)
{
- int ret;
-
- if (WARN_ON(!has_doorbell(client)))
- return -ENODEV; /* internal setup error, should never happen */
-
- __update_doorbell_desc(client, client->doorbell_id);
- __init_doorbell(client);
-
- ret = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (ret) {
- __fini_doorbell(client);
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
- client->stage_id, ret);
- return ret;
- }
+ const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc));
- return 0;
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc,
+ &guc->proc_desc_vaddr);
}
-static int destroy_doorbell(struct intel_guc_client *client)
+static void guc_proc_desc_destroy(struct intel_guc *guc)
{
- int ret;
-
- GEM_BUG_ON(!has_doorbell(client));
-
- __fini_doorbell(client);
- ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
- if (ret)
- DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
- client->stage_id, ret);
-
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
-
- return ret;
+ i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP);
}
-static unsigned long __select_cacheline(struct intel_guc *guc)
-{
- unsigned long offset;
-
- /* Doorbell uses a single cache line within a page */
- offset = offset_in_page(guc->db_cacheline);
-
- /* Moving to next cache line to reduce contention */
- guc->db_cacheline += cache_line_size();
-
- DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cache_line_size());
- return offset;
-}
-
-static inline struct guc_process_desc *
-__get_process_desc(struct intel_guc_client *client)
-{
- return client->vaddr + client->proc_desc_offset;
-}
-
-/*
- * Initialise the process descriptor shared with the GuC firmware.
- */
-static void guc_proc_desc_init(struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc *guc)
{
struct guc_process_desc *desc;
- desc = memset(__get_process_desc(client), 0, sizeof(*desc));
+ desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc));
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -301,47 +108,27 @@ static void guc_proc_desc_init(struct intel_guc_client *client)
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
- desc->stage_id = client->stage_id;
desc->wq_size_bytes = GUC_WQ_SIZE;
desc->wq_status = WQ_STATUS_ACTIVE;
- desc->priority = client->priority;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
}
-static void guc_proc_desc_fini(struct intel_guc_client *client)
+static void guc_proc_desc_fini(struct intel_guc *guc)
{
- struct guc_process_desc *desc;
-
- desc = __get_process_desc(client);
- memset(desc, 0, sizeof(*desc));
+ memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc));
}
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc,
- PAGE_ALIGN(sizeof(struct guc_stage_desc) *
- GUC_MAX_STAGE_DESCRIPTORS));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->stage_desc_pool = vma;
- guc->stage_desc_pool_vaddr = vaddr;
- ida_init(&guc->stage_ids);
+ u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS);
- return 0;
+ return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
+ &guc->stage_desc_pool_vaddr);
}
static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
{
- ida_destroy(&guc->stage_ids);
i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
}
@@ -349,63 +136,49 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* Initialise/clear the stage descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures relating to this client (doorbell, process descriptor,
- * write queue, etc).
+ * data structures related to work submission (process descriptor, write queue,
+ * etc).
*/
-static void guc_stage_desc_init(struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc)
{
- struct intel_guc *guc = client->guc;
struct guc_stage_desc *desc;
- u32 gfx_addr;
- desc = __get_stage_desc(client);
+ /* we only use 1 stage desc, so hardcode it to 0 */
+ desc = __get_stage_desc(guc, 0);
memset(desc, 0, sizeof(*desc));
desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
GUC_STAGE_DESC_ATTR_KERNEL;
- if (is_high_priority(client))
- desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
- desc->stage_id = client->stage_id;
- desc->priority = client->priority;
- desc->db_id = client->doorbell_id;
- /*
- * The doorbell, process descriptor, and workqueue are all parts
- * of the client object, which the GuC will reference via the GGTT
- */
- gfx_addr = intel_guc_ggtt_offset(guc, client->vma);
- desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
- client->doorbell_offset;
- desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
- desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc->process_desc = gfx_addr + client->proc_desc_offset;
- desc->wq_addr = gfx_addr + GUC_DB_SIZE;
- desc->wq_size = GUC_WQ_SIZE;
+ desc->stage_id = 0;
+ desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
- desc->desc_private = ptr_to_u64(client);
+ desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc);
+ desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue);
+ desc->wq_size = GUC_WQ_SIZE;
}
-static void guc_stage_desc_fini(struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc)
{
struct guc_stage_desc *desc;
- desc = __get_stage_desc(client);
+ desc = __get_stage_desc(guc, 0);
memset(desc, 0, sizeof(*desc));
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct intel_guc_client *client,
+static void guc_wq_item_append(struct intel_guc *guc,
u32 target_engine, u32 context_desc,
u32 ring_tail, u32 fence_id)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size / sizeof(u32) - 1;
- struct guc_process_desc *desc = __get_process_desc(client);
+ struct guc_process_desc *desc = guc->proc_desc_vaddr;
struct guc_wq_item *wqi;
u32 wq_off;
- lockdep_assert_held(&client->wq_lock);
+ lockdep_assert_held(&guc->wq_lock);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -425,58 +198,30 @@ static void guc_wq_item_append(struct intel_guc_client *client,
GUC_WQ_SIZE) < wqi_size);
GEM_BUG_ON(wq_off & (wqi_size - 1));
- /* WQ starts from the page after doorbell / process_desc */
- wqi = client->vaddr + wq_off + GUC_DB_SIZE;
-
- if (I915_SELFTEST_ONLY(client->use_nop_wqi)) {
- wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT);
- } else {
- /* Now fill in the 4-word work queue item */
- wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (target_engine << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
- wqi->context_desc = context_desc;
- wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
- GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
- wqi->fence_id = fence_id;
- }
+ wqi = guc->workqueue_vaddr + wq_off;
+
+ /* Now fill in the 4-word work queue item */
+ wqi->header = WQ_TYPE_INORDER |
+ (wqi_len << WQ_LEN_SHIFT) |
+ (target_engine << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
+ wqi->context_desc = context_desc;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
+ wqi->fence_id = fence_id;
/* Make the update visible to GuC */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_ring_doorbell(struct intel_guc_client *client)
-{
- struct guc_doorbell_info *db;
- u32 cookie;
-
- lockdep_assert_held(&client->wq_lock);
-
- /* pointer of current doorbell cacheline */
- db = __get_doorbell(client);
-
- /*
- * We're not expecting the doorbell cookie to change behind our back,
- * we also need to treat 0 as a reserved value.
- */
- cookie = READ_ONCE(db->cookie);
- WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
-
- /* XXX: doorbell was lost and need to acquire it again */
- GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
-}
-
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
- struct intel_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine = rq->engine;
- u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
+ u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- guc_wq_item_append(client, engine->guc_id, ctx_desc,
+ guc_wq_item_append(guc, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
- guc_ring_doorbell(client);
}
/*
@@ -488,10 +233,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
if (i915_vma_is_map_and_fenceable(vma))
- intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
+ intel_uncore_posting_read_fw(vma->vm->gt->uncore,
+ GUC_STATUS);
}
static void guc_submit(struct intel_engine_cs *engine,
@@ -499,9 +243,8 @@ static void guc_submit(struct intel_engine_cs *engine,
struct i915_request **end)
{
struct intel_guc *guc = &engine->gt->uc.guc;
- struct intel_guc_client *client = guc->execbuf_client;
- spin_lock(&client->wq_lock);
+ spin_lock(&guc->wq_lock);
do {
struct i915_request *rq = *out++;
@@ -510,7 +253,7 @@ static void guc_submit(struct intel_engine_cs *engine,
guc_add_request(guc, rq);
} while (out != end);
- spin_unlock(&client->wq_lock);
+ spin_unlock(&guc->wq_lock);
}
static inline int rq_prio(const struct i915_request *rq)
@@ -529,7 +272,7 @@ static struct i915_request *schedule_in(struct i915_request *rq, int idx)
* required if we generalise the inflight tracking.
*/
- intel_gt_pm_get(rq->engine->gt);
+ __intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}
@@ -537,7 +280,7 @@ static void schedule_out(struct i915_request *rq)
{
trace_i915_request_out(rq);
- intel_gt_pm_put(rq->engine->gt);
+ intel_gt_pm_put_async(rq->engine->gt);
i915_request_put(rq);
}
@@ -572,7 +315,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- if (last && rq->hw_context != last->hw_context) {
+ if (last && rq->context != last->context) {
if (port == last_port)
goto done;
@@ -631,7 +374,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Prevent request submission to the hardware until we have
@@ -658,7 +401,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
-static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq;
@@ -677,20 +420,20 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
stalled = false;
__i915_request_reset(rq, stalled);
- intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+ intel_lr_context_reset(engine, rq->context, rq->head, stalled);
out_unlock:
spin_unlock_irqrestore(&engine->active.lock, flags);
}
-static void guc_cancel_requests(struct intel_engine_cs *engine)
+static void guc_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
- GEM_TRACE("%s\n", engine->name);
+ ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
@@ -751,8 +494,8 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet);
- GEM_TRACE("%s: depth->%d\n", engine->name,
- atomic_read(&execlists->tasklet.count));
+ ENGINE_TRACE(engine, "depth->%d\n",
+ atomic_read(&execlists->tasklet.count));
}
/*
@@ -761,213 +504,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
* path of guc_submit() above.
*/
-/* Check that a doorbell register is in the expected state */
-static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
-{
- bool valid;
-
- GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
-
- valid = __doorbell_valid(guc, db_id);
-
- if (test_bit(db_id, guc->doorbell_bitmap) == valid)
- return true;
-
- DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
- db_id, yesno(valid));
-
- return false;
-}
-
-static bool guc_verify_doorbells(struct intel_guc *guc)
-{
- bool doorbells_ok = true;
- u16 db_id;
-
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- if (!doorbell_ok(guc, db_id))
- doorbells_ok = false;
-
- return doorbells_ok;
-}
-
-/**
- * guc_client_alloc() - Allocate an intel_guc_client
- * @guc: the intel_guc structure
- * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
- * The kernel client to replace ExecList submission is created with
- * NORMAL priority. Priority of a client for scheduler can be HIGH,
- * while a preemption context can use CRITICAL.
- *
- * Return: An intel_guc_client object if success, else NULL.
- */
-static struct intel_guc_client *
-guc_client_alloc(struct intel_guc *guc, u32 priority)
-{
- struct intel_guc_client *client;
- struct i915_vma *vma;
- void *vaddr;
- int ret;
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- client->guc = guc;
- client->priority = priority;
- client->doorbell_id = GUC_DOORBELL_INVALID;
- spin_lock_init(&client->wq_lock);
-
- ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
- GFP_KERNEL);
- if (ret < 0)
- goto err_client;
-
- client->stage_id = ret;
-
- /* The first page is doorbell/proc_desc. Two followed pages are wq. */
- vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_id;
- }
-
- /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
- client->vma = vma;
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_vma;
- }
- client->vaddr = vaddr;
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
- client->doorbell_offset = __select_cacheline(guc);
-
- /*
- * Since the doorbell only requires a single cacheline, we can save
- * space by putting the application process descriptor in the same
- * page. Use the half of the page that doesn't include the doorbell.
- */
- if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
- client->proc_desc_offset = 0;
- else
- client->proc_desc_offset = (GUC_DB_SIZE / 2);
-
- DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
- priority, client, client->stage_id);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
-
- return client;
-
-err_vaddr:
- i915_gem_object_unpin_map(client->vma->obj);
-err_vma:
- i915_vma_unpin_and_release(&client->vma, 0);
-err_id:
- ida_simple_remove(&guc->stage_ids, client->stage_id);
-err_client:
- kfree(client);
- return ERR_PTR(ret);
-}
-
-static void guc_client_free(struct intel_guc_client *client)
-{
- unreserve_doorbell(client);
- i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
- ida_simple_remove(&client->guc->stage_ids, client->stage_id);
- kfree(client);
-}
-
-static inline bool ctx_save_restore_disabled(struct intel_context *ce)
-{
- u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
-
-#define SR_DISABLED \
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
-
- return (sr & SR_DISABLED) == SR_DISABLED;
-
-#undef SR_DISABLED
-}
-
-static int guc_clients_create(struct intel_guc *guc)
-{
- struct intel_guc_client *client;
-
- GEM_BUG_ON(guc->execbuf_client);
-
- client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for submission!\n");
- return PTR_ERR(client);
- }
- guc->execbuf_client = client;
-
- return 0;
-}
-
-static void guc_clients_destroy(struct intel_guc *guc)
-{
- struct intel_guc_client *client;
-
- client = fetch_and_zero(&guc->execbuf_client);
- if (client)
- guc_client_free(client);
-}
-
-static int __guc_client_enable(struct intel_guc_client *client)
-{
- int ret;
-
- guc_proc_desc_init(client);
- guc_stage_desc_init(client);
-
- ret = create_doorbell(client);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
- return ret;
-}
-
-static void __guc_client_disable(struct intel_guc_client *client)
-{
- /*
- * By the time we're here, GuC may have already been reset. if that is
- * the case, instead of trying (in vain) to communicate with it, let's
- * just cleanup the doorbell HW and our internal state.
- */
- if (intel_guc_is_running(client->guc))
- destroy_doorbell(client);
- else
- __fini_doorbell(client);
-
- guc_stage_desc_fini(client);
- guc_proc_desc_fini(client);
-}
-
-static int guc_clients_enable(struct intel_guc *guc)
-{
- return __guc_client_enable(guc->execbuf_client);
-}
-
-static void guc_clients_disable(struct intel_guc *guc)
-{
- if (guc->execbuf_client)
- __guc_client_disable(guc->execbuf_client);
-}
-
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -988,13 +524,20 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- WARN_ON(!guc_verify_doorbells(guc));
- ret = guc_clients_create(guc);
+ ret = guc_workqueue_create(guc);
if (ret)
goto err_pool;
+ ret = guc_proc_desc_create(guc);
+ if (ret)
+ goto err_workqueue;
+
+ spin_lock_init(&guc->wq_lock);
+
return 0;
+err_workqueue:
+ guc_workqueue_destroy(guc);
err_pool:
guc_stage_desc_pool_destroy(guc);
return ret;
@@ -1002,83 +545,37 @@ err_pool:
void intel_guc_submission_fini(struct intel_guc *guc)
{
- guc_clients_destroy(guc);
- WARN_ON(!guc_verify_doorbells(guc));
-
- if (guc->stage_desc_pool)
+ if (guc->stage_desc_pool) {
+ guc_proc_desc_destroy(guc);
+ guc_workqueue_destroy(guc);
guc_stage_desc_pool_destroy(guc);
+ }
}
static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
- /* tell all command streamers to forward interrupts (but not vblank)
- * to GuC
- */
- irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route USER_INTERRUPT to Host, all others are sent to GuC. */
- irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- /* These three registers have the same bit definitions */
- intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
- intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
- intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
- /*
- * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
- * (unmasked) PM interrupts to the GuC. All other bits of this
- * register *disable* generation of a specific interrupt.
- *
- * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
- * writing to the PM interrupt mask register, i.e. interrupts
- * that must not be disabled.
- *
- * If the GuC is handling these interrupts, then we must not let
- * the PM code disable ANY interrupt that the GuC is expecting.
- * So for each ENABLED (0) bit in this register, we must SET the
- * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
- * GuC needs ARAT expired interrupt unmasked hence it is set in
- * pm_intrmsk_mbz.
- *
- * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
- * result in the register bit being left SET!
- */
- rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
- rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ /* Don't handle the ctx switch interrupt in GuC submission mode */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0);
}
static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
+ u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT;
+ u32 dmask = irqs << 16 | irqs;
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt, id)
- ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
-
- /* route all GT interrupts to the host */
- intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
- intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
- intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
-
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 11);
+
+ /* Handle ctx switch interrupts again */
+ intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask);
+ intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask);
}
static void guc_set_default_submission(struct intel_engine_cs *engine)
@@ -1102,11 +599,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->park = engine->unpark = NULL;
engine->reset.prepare = guc_reset_prepare;
- engine->reset.reset = guc_reset;
+ engine->reset.rewind = guc_reset_rewind;
+ engine->reset.cancel = guc_reset_cancel;
engine->reset.finish = guc_reset_finish;
- engine->cancel_requests = guc_cancel_requests;
-
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
@@ -1119,16 +615,11 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->irq_enable || engine->irq_disable);
}
-int intel_guc_submission_enable(struct intel_guc *guc)
+void intel_guc_submission_enable(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
-
- err = i915_inject_probe_error(gt->i915, -ENXIO);
- if (err)
- return err;
/*
* We're using GuC work items for submitting work through GuC. Since
@@ -1143,11 +634,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = guc_clients_enable(guc);
- if (err)
- return err;
+ guc_proc_desc_init(guc);
+ guc_stage_desc_init(guc);
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(gt);
@@ -1156,8 +644,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
-
- return 0;
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -1166,8 +652,12 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(gt->awake); /* GT should be parked first */
+ /* Note: By the time we're here, GuC may have already been reset */
+
guc_interrupts_release(gt);
- guc_clients_disable(guc);
+
+ guc_stage_desc_fini(guc);
+ guc_proc_desc_fini(guc);
}
static bool __guc_submission_support(struct intel_guc *guc)
@@ -1186,6 +676,7 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
guc->submission_supported = __guc_submission_support(guc);
}
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_guc.c"
-#endif
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission == guc_set_default_submission;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 54d716828352..e402a2932592 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -6,62 +6,18 @@
#ifndef _INTEL_GUC_SUBMISSION_H_
#define _INTEL_GUC_SUBMISSION_H_
-#include <linux/spinlock.h>
+#include <linux/types.h>
-#include "gt/intel_engine_types.h"
-
-#include "i915_gem.h"
-#include "i915_selftest.h"
-
-struct drm_i915_private;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- */
-struct intel_guc_client {
- struct i915_vma *vma;
- void *vaddr;
- struct intel_guc *guc;
-
- /* bitmap of (host) engine ids */
- u32 priority;
- u32 stage_id;
- u32 proc_desc_offset;
-
- u16 doorbell_id;
- unsigned long doorbell_offset;
-
- /* Protects GuC client's WQ access */
- spinlock_t wq_lock;
-
- /* For testing purposes, use nop WQ items instead of real ones */
- I915_SELFTEST_DECLARE(bool use_nop_wqi);
-};
+struct intel_guc;
+struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
int intel_guc_submission_init(struct intel_guc *guc);
-int intel_guc_submission_enable(struct intel_guc *guc);
+void intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
+bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index d654340d4d03..eee193bf2cc4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -39,5 +39,5 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
int intel_huc_fw_upload(struct intel_huc *huc)
{
/* HW doesn't look at destination address for HuC, so set it to 0 */
- return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
+ return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 629b19377a29..3ffc6267f96e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -123,6 +123,11 @@ static void __uc_free_load_err_log(struct intel_uc *uc)
i915_gem_object_put(log);
}
+static inline bool guc_communication_enabled(struct intel_guc *guc)
+{
+ return intel_guc_ct_enabled(&guc->ct);
+}
+
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
@@ -158,7 +163,7 @@ static void guc_handle_mmio_msg(struct intel_guc *guc)
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
/* we need communication to be enabled to reply to GuC */
- GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+ GEM_BUG_ON(!guc_communication_enabled(guc));
if (!guc->mmio_msg)
return;
@@ -185,11 +190,6 @@ static void guc_disable_interrupts(struct intel_guc *guc)
guc->interrupts.disable(guc);
}
-static inline bool guc_communication_enabled(struct intel_guc *guc)
-{
- return guc->send != intel_guc_send_nop;
-}
-
static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -205,9 +205,6 @@ static int guc_enable_communication(struct intel_guc *guc)
if (ret)
return ret;
- guc->send = intel_guc_send_ct;
- guc->handler = intel_guc_to_host_event_handler_ct;
-
/* check for mmio messages received before/during the CT enable */
guc_get_mmio_msg(guc);
guc_handle_mmio_msg(guc);
@@ -216,7 +213,7 @@ static int guc_enable_communication(struct intel_guc *guc)
/* check for CT messages received before we enabled interrupts */
spin_lock_irq(&i915->irq_lock);
- intel_guc_to_host_event_handler_ct(guc);
+ intel_guc_ct_event_handler(&guc->ct);
spin_unlock_irq(&i915->irq_lock);
DRM_INFO("GuC communication enabled\n");
@@ -224,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
-static void __guc_stop_communication(struct intel_guc *guc)
+static void guc_disable_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
@@ -235,23 +232,6 @@ static void __guc_stop_communication(struct intel_guc *guc)
guc_disable_interrupts(guc);
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- __guc_stop_communication(guc);
-
- DRM_INFO("GuC communication stopped\n");
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
- __guc_stop_communication(guc);
-
intel_guc_ct_disable(&guc->ct);
/*
@@ -267,28 +247,22 @@ static void guc_disable_communication(struct intel_guc *guc)
void intel_uc_fetch_firmwares(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
int err;
if (!intel_uc_uses_guc(uc))
return;
- err = intel_uc_fw_fetch(&uc->guc.fw, i915);
+ err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
if (intel_uc_uses_huc(uc))
- intel_uc_fw_fetch(&uc->huc.fw, i915);
+ intel_uc_fw_fetch(&uc->huc.fw);
}
void intel_uc_cleanup_firmwares(struct intel_uc *uc)
{
- if (!intel_uc_uses_guc(uc))
- return;
-
- if (intel_uc_uses_huc(uc))
- intel_uc_fw_cleanup_fetch(&uc->huc.fw);
-
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
@@ -316,15 +290,8 @@ void intel_uc_init(struct intel_uc *uc)
void intel_uc_fini(struct intel_uc *uc)
{
- struct intel_guc *guc = &uc->guc;
-
- if (!intel_uc_uses_guc(uc))
- return;
-
- if (intel_uc_uses_huc(uc))
- intel_huc_fini(&uc->huc);
-
- intel_guc_fini(guc);
+ intel_huc_fini(&uc->huc);
+ intel_guc_fini(&uc->guc);
__uc_free_load_err_log(uc);
}
@@ -486,11 +453,8 @@ int intel_uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc)) {
- ret = intel_guc_submission_enable(guc);
- if (ret)
- goto err_communication;
- }
+ if (intel_uc_supports_guc_submission(uc))
+ intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
@@ -560,7 +524,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- guc_stop_communication(guc);
+ guc_disable_communication(guc);
__uc_sanitize(uc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 66a30ab7044a..8ee0a0c7f447 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -11,7 +11,6 @@
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
{
GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
@@ -22,6 +21,7 @@ static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
}
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
@@ -219,10 +219,9 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
}
-static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
- struct drm_i915_private *i915,
- int e)
+static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
bool user = e == -EINVAL;
if (i915_inject_probe_error(i915, e)) {
@@ -260,14 +259,14 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
- * @i915: device private
*
* Fetch uC firmware into GEM obj.
*
* Return: 0 on success, a negative errno code on failure.
*/
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
+ struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
@@ -282,8 +281,8 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
if (err)
return err;
- __force_fw_fetch_failures(uc_fw, i915, -EINVAL);
- __force_fw_fetch_failures(uc_fw, i915, -ESTALE);
+ __force_fw_fetch_failures(uc_fw, -EINVAL);
+ __force_fw_fetch_failures(uc_fw, -ESTALE);
err = request_firmware(&fw, uc_fw->path, dev);
if (err)
@@ -390,8 +389,9 @@ fail:
return err;
}
-static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
{
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct drm_mm_node *node = &ggtt->uc_fw;
GEM_BUG_ON(!drm_mm_node_allocated(node));
@@ -401,13 +401,12 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
return lower_32_bits(node->start);
}
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
- struct intel_gt *gt)
+static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct i915_vma dummy = {
- .node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
+ .node.start = uc_fw_ggtt_offset(uc_fw),
.node.size = obj->base.size,
.pages = obj->mm.pages,
.vm = &ggtt->vm,
@@ -422,19 +421,18 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
}
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
- struct intel_gt *gt)
+static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = gt->ggtt;
- u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
+ struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw);
ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
}
-static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags)
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct intel_uncore *uncore = gt->uncore;
u64 offset;
int ret;
@@ -446,13 +444,13 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
- offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
+ offset = uc_fw_ggtt_offset(uc_fw);
GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
/* Set the DMA destination */
- intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
@@ -484,17 +482,16 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
- * @gt: the intel_gt structure
- * @wopcm_offset: destination offset in wopcm
+ * @dst_offset: destination offset
* @dma_flags: flags for flags for dma ctrl
*
* Loads uC firmware and updates internal flags.
*
* Return: 0 on success, non-zero on failure.
*/
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags)
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
int err;
/* make sure the status was cleared the last time we reset the uc */
@@ -508,9 +505,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
return -ENOEXEC;
/* Call custom loader */
- intel_uc_fw_ggtt_bind(uc_fw, gt);
- err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
- intel_uc_fw_ggtt_unbind(uc_fw, gt);
+ uc_fw_bind_ggtt(uc_fw);
+ err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
+ uc_fw_unbind_ggtt(uc_fw);
if (err)
goto fail;
@@ -547,10 +544,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- if (!intel_uc_fw_is_available(uc_fw))
- return;
-
- i915_gem_object_unpin_pages(uc_fw->obj);
+ intel_uc_fw_cleanup_fetch(uc_fw);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 7a0a5989afc9..1f30543d0d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -229,10 +229,9 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type, bool supported,
enum intel_platform platform, u8 rev);
-int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915);
+int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
- u32 wopcm_offset, u32 dma_flags);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
deleted file mode 100644
index d8a80388bd31..000000000000
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ /dev/null
@@ -1,299 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2017 Intel Corporation
- */
-
-#include "i915_selftest.h"
-#include "gem/i915_gem_pm.h"
-
-/* max doorbell number + negative test for each client type */
-#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
-
-static struct intel_guc_client *clients[ATTEMPTS];
-
-static bool available_dbs(struct intel_guc *guc, u32 priority)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- /* first half is used for normal priority, second half for high */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
- if (id < end)
- return true;
-
- return false;
-}
-
-static int check_all_doorbells(struct intel_guc *guc)
-{
- u16 db_id;
-
- pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
- if (!doorbell_ok(guc, db_id)) {
- pr_err("doorbell %d, not ok\n", db_id);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int ring_doorbell_nop(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
- int err;
-
- client->use_nop_wqi = true;
-
- spin_lock_irq(&client->wq_lock);
-
- guc_wq_item_append(client, 0, 0, 0, 0);
- guc_ring_doorbell(client);
-
- spin_unlock_irq(&client->wq_lock);
-
- client->use_nop_wqi = false;
-
- /* if there are no issues GuC will update the WQ head and keep the
- * WQ in active status
- */
- err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
- if (err) {
- pr_err("doorbell %u ring failed!\n", client->doorbell_id);
- return -EIO;
- }
-
- if (desc->wq_status != WQ_STATUS_ACTIVE) {
- pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
- client->doorbell_id, desc->wq_status);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Basic client sanity check, handy to validate create_clients.
- */
-static int validate_client(struct intel_guc_client *client, int client_priority)
-{
- if (client->priority != client_priority ||
- client->doorbell_id == GUC_DOORBELL_INVALID)
- return -EINVAL;
- else
- return 0;
-}
-
-static bool client_doorbell_in_sync(struct intel_guc_client *client)
-{
- return !client || doorbell_ok(client->guc, client->doorbell_id);
-}
-
-/*
- * Check that we're able to synchronize guc_clients with their doorbells
- *
- * We're creating clients and reserving doorbells once, at module load. During
- * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
- * GuC being reset. In other words - GuC clients are still around, but the
- * status of their doorbells may be incorrect. This is the reason behind
- * validating that the doorbells status expected by the driver matches what the
- * GuC/HW have.
- */
-static int igt_guc_clients(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
- intel_wakeref_t wakeref;
- int err = 0;
-
- GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- /*
- * Get rid of clients created during driver load because the test will
- * recreate them.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- if (guc->execbuf_client) {
- pr_err("guc_clients_destroy lied!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = guc_clients_create(guc);
- if (err) {
- pr_err("Failed to create clients\n");
- goto unlock;
- }
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL);
- if (err) {
- pr_err("execbug client validation failed\n");
- goto out;
- }
-
- /* the client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client)) {
- pr_err("guc_clients_create didn't reserve doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Now enable the clients */
- guc_clients_enable(guc);
-
- /* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client)) {
- pr_err("failed to initialize the doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Basic test - an attempt to reallocate a valid doorbell to the
- * client it is currently assigned should not cause a failure.
- */
- err = create_doorbell(guc->execbuf_client);
-
-out:
- /*
- * Leave clean state for other test, plus the driver always destroy the
- * clients during unload.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- guc_clients_create(guc);
- guc_clients_enable(guc);
-unlock:
- intel_runtime_pm_put(gt->uncore->rpm, wakeref);
- return err;
-}
-
-/*
- * Create as many clients as number of doorbells. Note that there's already
- * client(s)/doorbell(s) created during driver load, but this test creates
- * its own and do not interact with the existing ones.
- */
-static int igt_guc_doorbells(void *arg)
-{
- struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
- intel_wakeref_t wakeref;
- int i, err = 0;
- u16 db_id;
-
- GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM);
-
- if (!clients[i]) {
- pr_err("[%d] No guc client\n", i);
- err = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(clients[i])) {
- if (PTR_ERR(clients[i]) != -ENOSPC) {
- pr_err("[%d] unexpected error\n", i);
- err = PTR_ERR(clients[i]);
- goto out;
- }
-
- if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
- pr_err("[%d] non-db related alloc fail\n", i);
- err = -EINVAL;
- goto out;
- }
-
- /* expected, ran out of dbs for this client type */
- continue;
- }
-
- /*
- * The check below is only valid because we keep a doorbell
- * assigned during the whole life of the client.
- */
- if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
- pr_err("[%d] more clients than doorbells (%d >= %d)\n",
- i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
- err = -EINVAL;
- goto out;
- }
-
- err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
- if (err) {
- pr_err("[%d] client_alloc sanity check failed!\n", i);
- err = -EINVAL;
- goto out;
- }
-
- db_id = clients[i]->doorbell_id;
-
- err = __guc_client_enable(clients[i]);
- if (err) {
- pr_err("[%d] Failed to create a doorbell\n", i);
- goto out;
- }
-
- /* doorbell id shouldn't change, we are holding the mutex */
- if (db_id != clients[i]->doorbell_id) {
- pr_err("[%d] doorbell id changed (%d != %d)\n",
- i, db_id, clients[i]->doorbell_id);
- err = -EINVAL;
- goto out;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto out;
-
- err = ring_doorbell_nop(clients[i]);
- if (err)
- goto out;
- }
-
-out:
- for (i = 0; i < ATTEMPTS; i++)
- if (!IS_ERR_OR_NULL(clients[i])) {
- __guc_client_disable(clients[i]);
- guc_client_free(clients[i]);
- }
-unlock:
- intel_runtime_pm_put(gt->uncore->rpm, wakeref);
- return err;
-}
-
-int intel_guc_live_selftest(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_guc_clients),
- SUBTEST(igt_guc_doorbells),
- };
-
- if (!USES_GUC_SUBMISSION(i915))
- return 0;
-
- return intel_gt_live_subtests(tests, &i915->gt);
-}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index 286703643002..ab25d151932a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -38,6 +38,10 @@
#define GVT_CMD_HASH_BITS 7
+struct intel_gvt;
+struct intel_shadow_wa_ctx;
+struct intel_vgpu_workload;
+
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index a87f33e6a23c..b59b34046e1e 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -35,6 +35,11 @@
#ifndef _GVT_DISPLAY_H_
#define _GVT_DISPLAY_H_
+#include <linux/types.h>
+
+struct intel_gvt;
+struct intel_vgpu;
+
#define SBI_REG_MAX 20
#define DPCD_SIZE 0x700
diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h
index f6dfc8b795ec..dfe0cbc6aad8 100644
--- a/drivers/gpu/drm/i915/gvt/edid.h
+++ b/drivers/gpu/drm/i915/gvt/edid.h
@@ -35,6 +35,10 @@
#ifndef _GVT_EDID_H_
#define _GVT_EDID_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
+
#define EDID_SIZE 128
#define EDID_ADDR 0x50 /* Linux hvm EDID addr */
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5ccc2c695848..5c0c1fd30c83 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -35,6 +35,8 @@
#ifndef _GVT_EXECLIST_H_
#define _GVT_EXECLIST_H_
+#include <linux/types.h>
+
struct execlist_ctx_descriptor_format {
union {
u32 ldw;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 60c155085029..67b6ede9e707 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -36,6 +36,8 @@
#ifndef _GVT_FB_DECODER_H_
#define _GVT_FB_DECODER_H_
+#include <linux/types.h>
+
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4b04af569c05..34cb404ba4b7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return -EINVAL;
default:
GEM_BUG_ON(1);
- };
+ }
/* direct shadow */
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 4862fb12778e..9599c0a762b2 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -33,6 +33,10 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+#include <linux/types.h>
+
+struct device;
+
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h
index 5313fb1b33e1..fcd663811d37 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.h
+++ b/drivers/gpu/drm/i915/gvt/interrupt.h
@@ -32,6 +32,8 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
+#include <linux/types.h>
+
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,
RCS_DEBUG,
@@ -135,6 +137,7 @@ enum intel_gvt_event_type {
struct intel_gvt_irq;
struct intel_gvt;
+struct intel_vgpu;
typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq,
enum intel_gvt_event_type event, struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 5874f1cb4306..2e68f4b02c94 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -36,6 +36,8 @@
#ifndef _GVT_MMIO_H_
#define _GVT_MMIO_H_
+#include <linux/types.h>
+
struct intel_gvt;
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h
index fa607a71c3c0..f6eb7135583c 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.h
+++ b/drivers/gpu/drm/i915/gvt/page_track.h
@@ -25,6 +25,9 @@
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
+#include <linux/types.h>
+
+struct intel_vgpu;
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index 7b59e3e88b8b..3dacdad5f529 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -34,6 +34,9 @@
#ifndef __GVT_SCHED_POLICY__
#define __GVT_SCHED_POLICY__
+struct intel_gvt;
+struct intel_vgpu;
+
struct intel_gvt_sched_policy_ops {
int (*init)(struct intel_gvt *gvt);
void (*clean)(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5b2a7d072ec9..b3299f88e24e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,12 +35,12 @@
#include <linux/kthread.h>
-#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
+#include "i915_gem_gtt.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
static void update_shadow_pdps(struct intel_vgpu_workload *workload)
{
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
- workload->req->hw_context->state->obj;
+ workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *dst;
@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0;
}
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
{
- return i915_gem_context_force_single_submission(req->gem_context);
+ return intel_context_force_single_submission(rq->context);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
u32 *cs;
int err;
- if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+ if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
intel_vgpu_restore_inhibit_context(vgpu, req);
/*
@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
}
static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+ struct intel_context *ce)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt =
- i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- i915_vm_put(&ppgtt->vm);
}
static int
@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
struct i915_request *rq = workload->req;
struct execlist_ring_context *shadow_ring_context =
- (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+ (struct execlist_ring_context *)rq->context->lrc_reg_state;
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+ struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
@@ -1223,18 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ppgtt = i915_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
- i915_gem_context_set_force_single_submission(ctx);
-
- ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) {
@@ -1243,12 +1236,16 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&s->workload_q_head[i]);
s->shadow[i] = ERR_PTR(-EINVAL);
- ce = intel_context_create(ctx, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
goto out_shadow_ctx;
}
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(&ppgtt->vm);
+ intel_context_set_single_submission(ce);
+
if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
const unsigned int ring_size = 512 * SZ_4K;
@@ -1281,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
i915_vm_put(&ppgtt->vm);
- i915_gem_context_put(ctx);
return 0;
out_shadow_ctx:
@@ -1294,7 +1290,6 @@ out_shadow_ctx:
intel_context_put(s->shadow[i]);
}
i915_vm_put(&ppgtt->vm);
- i915_gem_context_put(ctx);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index a19e7d89bc8a..cfe09964622b 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
#include <linux/debugobjects.h>
+#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -91,10 +92,9 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
- spin_lock_irq(&ref->tree_lock);
+ lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */
debug_object_activate(ref, &active_debug_desc);
- spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
@@ -186,18 +186,33 @@ active_retire(struct i915_active *ref)
__active_retire(ref);
}
+static inline struct dma_fence **
+__active_fence_slot(struct i915_active_fence *active)
+{
+ return (struct dma_fence ** __force)&active->fence;
+}
+
+static inline bool
+active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
+
+ return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
+}
+
static void
node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
- active_retire(container_of(cb, struct active_node, base.cb)->ref);
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
static void
excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
- active_retire(container_of(cb, struct i915_active, excl.cb));
+ if (active_fence_cb(fence, cb))
+ active_retire(container_of(cb, struct i915_active, excl.cb));
}
static struct i915_active_fence *
@@ -244,7 +259,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
}
node = prealloc;
- __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
+ __i915_active_fence_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -262,7 +277,8 @@ out:
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
- struct lock_class_key *key)
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey)
{
unsigned long bits;
@@ -280,9 +296,12 @@ void __i915_active_init(struct i915_active *ref,
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
- __mutex_init(&ref->mutex, "i915_active", key);
- __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+ __mutex_init(&ref->mutex, "i915_active", mkey);
+ __i915_active_fence_init(&ref->excl, NULL, excl_retire);
INIT_WORK(&ref->work, active_work);
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
+#endif
}
static bool ____active_del_barrier(struct i915_active *ref,
@@ -376,15 +395,8 @@ void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- /*
- * As we don't know which mutex the caller is using, we told a small
- * lie to the debug code that it is using the i915_active.mutex;
- * and now we must stick to that lie.
- */
- mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
if (!__i915_active_fence_set(&ref->excl, f))
atomic_inc(&ref->count);
- mutex_release(&ref->mutex.dep_map, _THIS_IP_);
}
bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -407,8 +419,10 @@ int i915_active_acquire(struct i915_active *ref)
if (!atomic_read(&ref->count) && ref->active)
err = ref->active(ref);
if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
debug_active_activate(ref);
atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
}
mutex_unlock(&ref->mutex);
@@ -461,6 +475,7 @@ int i915_active_wait(struct i915_active *ref)
if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
return -EINTR;
+ flush_work(&ref->work);
return 0;
}
@@ -615,10 +630,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
goto unwind;
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- node->base.lock =
- &engine->kernel_context->timeline->mutex;
-#endif
RCU_INIT_POINTER(node->base.fence, NULL);
node->base.cb.func = node_retire;
node->timeline = idx;
@@ -639,6 +650,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node->base.cb.node.prev = (void *)engine;
atomic_inc(&ref->count);
}
+ GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
@@ -702,12 +714,18 @@ void i915_active_acquire_barrier(struct i915_active *ref)
}
}
+static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
+{
+ return __active_fence_slot(&barrier_from_ll(node)->base);
+}
+
void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
unsigned long flags;
+ GEM_BUG_ON(!intel_context_is_barrier(rq->context));
GEM_BUG_ON(intel_engine_is_virtual(engine));
GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
@@ -721,19 +739,13 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*/
spin_lock_irqsave(&rq->lock, flags);
llist_for_each_safe(node, next, node) {
- RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
- smp_wmb(); /* serialise with reuse_idle_barrier */
+ /* serialise with reuse_idle_barrier */
+ smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
list_add_tail((struct list_head *)node, &rq->fence.cb_list);
}
spin_unlock_irqrestore(&rq->lock, flags);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-#define active_is_held(active) lockdep_is_held((active)->lock)
-#else
-#define active_is_held(active) true
-#endif
-
/*
* __i915_active_fence_set: Update the last active fence along its timeline
* @active: the active tracker
@@ -744,7 +756,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
* fence onto this one. Returns the previous fence (if not already completed),
* which the caller must ensure is executed before the new fence. To ensure
* that the order of fences within the timeline of the i915_active_fence is
- * maintained, it must be locked by the caller.
+ * understood, it should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -753,34 +765,41 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
- /* NB: must be serialised by an outer timeline mutex (active->lock) */
- spin_lock_irqsave(fence->lock, flags);
+ if (fence == rcu_access_pointer(active->fence))
+ return fence;
+
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- prev = rcu_dereference_protected(active->fence, active_is_held(active));
+ /*
+ * Consider that we have two threads arriving (A and B), with
+ * C already resident as the active->fence.
+ *
+ * A does the xchg first, and so it sees C or NULL depending
+ * on the timing of the interrupt handler. If it is NULL, the
+ * previous fence must have been signaled and we know that
+ * we are first on the timeline. If it is still present,
+ * we acquire the lock on that fence and serialise with the interrupt
+ * handler, in the process removing it from any future interrupt
+ * callback. A will then wait on C before executing (if present).
+ *
+ * As B is second, it sees A as the previous fence and so waits for
+ * it to complete its transition and takes over the occupancy for
+ * itself -- remembering that it needs to wait on A before executing.
+ *
+ * Note the strong ordering of the timeline also provides consistent
+ * nesting rules for the fence->lock; the inner lock is always the
+ * older lock.
+ */
+ spin_lock_irqsave(fence->lock, flags);
+ prev = xchg(__active_fence_slot(active), fence);
if (prev) {
GEM_BUG_ON(prev == fence);
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
-
- /*
- * active->fence is reset by the callback from inside
- * interrupt context. We need to serialise our list
- * manipulation with the fence->lock to prevent the prev
- * being lost inside an interrupt (it can't be replaced as
- * no other caller is allowed to enter __i915_active_fence_set
- * as we hold the timeline lock). After serialising with
- * the callback, we need to double check which ran first,
- * our list_del() [decoupling prev from the callback] or
- * the callback...
- */
- prev = rcu_access_pointer(active->fence);
}
-
- rcu_assign_pointer(active->fence, fence);
+ GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
-
spin_unlock_irqrestore(fence->lock, flags);
return prev;
@@ -792,10 +811,6 @@ int i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *fence;
int err = 0;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- lockdep_assert_held(active->lock);
-#endif
-
/* Must maintain timeline ordering wrt previous active requests */
rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
@@ -812,7 +827,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- i915_active_fence_cb(fence, cb);
+ active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 44859356ce97..b571f675c795 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -61,19 +61,15 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
*/
static inline void
__i915_active_fence_init(struct i915_active_fence *active,
- struct mutex *lock,
void *fence,
dma_fence_func_t fn)
{
RCU_INIT_POINTER(active->fence, fence);
active->cb.func = fn ?: i915_active_noop;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- active->lock = lock;
-#endif
}
-#define INIT_ACTIVE_FENCE(A, LOCK) \
- __i915_active_fence_init((A), (LOCK), NULL, NULL)
+#define INIT_ACTIVE_FENCE(A) \
+ __i915_active_fence_init((A), NULL, NULL)
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -127,15 +123,6 @@ i915_active_fence_isset(const struct i915_active_fence *active)
return rcu_access_pointer(active->fence);
}
-static inline void
-i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct i915_active_fence *active =
- container_of(cb, typeof(*active), cb);
-
- RCU_INIT_POINTER(active->fence, NULL);
-}
-
/*
* GPU activity tracking
*
@@ -165,11 +152,15 @@ i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
- struct lock_class_key *key);
+ struct lock_class_key *mkey,
+ struct lock_class_key *wkey);
+
+/* Specialise each class of i915_active to avoid impossible lockdep cycles. */
#define i915_active_init(ref, active, retire) do { \
- static struct lock_class_key __key; \
+ static struct lock_class_key __mkey; \
+ static struct lock_class_key __wkey; \
\
- __i915_active_init(ref, active, retire, &__key); \
+ __i915_active_init(ref, active, retire, &__mkey, &__wkey); \
} while (0)
int i915_active_ref(struct i915_active *ref,
@@ -215,5 +206,6 @@ void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+void i915_active_unlock_wait(struct i915_active *ref);
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 96aed0ee700a..6360c3e4b765 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -20,21 +20,6 @@
struct i915_active_fence {
struct dma_fence __rcu *fence;
struct dma_fence_cb cb;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- /*
- * Incorporeal!
- *
- * Updates to the i915_active_request must be serialised under a lock
- * to ensure that the timeline is ordered. Normally, this is the
- * timeline->mutex, but another mutex may be used so long as it is
- * done so consistently.
- *
- * For lockdep tracking of the above, we store the lock we intend
- * to always use for updates of this i915_active_request during
- * construction and assert that is held on every update.
- */
- struct mutex *lock;
-#endif
};
struct active_node;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f24096e27bef..a0e437aa65b7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -235,7 +235,7 @@ static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
/*
* MI_BATCH_BUFFER_START requires some special handling. It's not
* really a 'skip' action but it doesn't seem like it's worth adding
- * a new action. See i915_parse_cmds().
+ * a new action. See intel_engine_cmd_parser().
*/
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
@@ -731,7 +731,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -754,7 +754,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
return 0xFF;
}
- DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -767,7 +767,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
else if (client == INSTR_BC_CLIENT)
return 0xFF;
- DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -778,7 +778,7 @@ static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
return 0xFF;
- DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
@@ -1127,79 +1127,71 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
- u32 batch_start_offset,
- u32 batch_len,
- bool *needs_clflush_after)
+ u32 offset, u32 length)
{
- unsigned int src_needs_clflush;
- unsigned int dst_needs_clflush;
+ bool needs_clflush;
void *dst, *src;
int ret;
- ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
- if (ret)
- return ERR_PTR(ret);
-
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
- i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
return dst;
- ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ ret = i915_gem_object_pin_pages(src_obj);
if (ret) {
i915_gem_object_unpin_map(dst_obj);
return ERR_PTR(ret);
}
+ needs_clflush =
+ !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
+
src = ERR_PTR(-ENODEV);
- if (src_needs_clflush &&
- i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
+ if (needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
- i915_memcpy_from_wc(dst,
- src + batch_start_offset,
- ALIGN(batch_len, 16));
+ i915_unaligned_memcpy_from_wc(dst,
+ src + offset,
+ length);
i915_gem_object_unpin_map(src_obj);
}
}
if (IS_ERR(src)) {
void *ptr;
- int offset, n;
+ int x, n;
- offset = offset_in_page(batch_start_offset);
-
- /* We can avoid clflushing partial cachelines before the write
+ /*
+ * We can avoid clflushing partial cachelines before the write
* if we only every write full cache-lines. Since we know that
* both the source and destination are in multiples of
* PAGE_SIZE, we can simply round up to the next cacheline.
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
- if (dst_needs_clflush & CLFLUSH_BEFORE)
- batch_len = roundup(batch_len,
- boot_cpu_data.x86_clflush_size);
+ if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ length = round_up(length,
+ boot_cpu_data.x86_clflush_size);
ptr = dst;
- for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
- int len = min_t(int, batch_len, PAGE_SIZE - offset);
+ x = offset_in_page(offset);
+ for (n = offset >> PAGE_SHIFT; length; n++) {
+ int len = min_t(int, length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
- if (src_needs_clflush)
- drm_clflush_virt_range(src + offset, len);
- memcpy(ptr, src + offset, len);
+ if (needs_clflush)
+ drm_clflush_virt_range(src + x, len);
+ memcpy(ptr, src + x, len);
kunmap_atomic(src);
ptr += len;
- batch_len -= len;
- offset = 0;
+ length -= len;
+ x = 0;
}
}
- i915_gem_object_finish_access(src_obj);
+ i915_gem_object_unpin_pages(src_obj);
/* dst_obj is returned with vmap pinned */
- *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-
return dst;
}
@@ -1211,7 +1203,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
if (desc->flags & CMD_DESC_REJECT) {
- DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
+ DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
return false;
}
@@ -1231,8 +1223,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
find_reg(engine, reg_addr);
if (!reg) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
- reg_addr, *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+ reg_addr, *cmd, engine->name);
return false;
}
@@ -1242,22 +1234,22 @@ static bool check_cmd(const struct intel_engine_cs *engine,
*/
if (reg->mask) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
- reg_addr);
+ DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
+ reg_addr);
return false;
}
}
@@ -1284,8 +1276,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
if (desc->bits[i].offset >= length) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
- *cmd, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
return false;
}
@@ -1293,11 +1285,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
- *cmd,
- desc->bits[i].mask,
- desc->bits[i].expected,
- dword, engine->name);
+ DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
+ *cmd,
+ desc->bits[i].mask,
+ desc->bits[i].expected,
+ dword, engine->name);
return false;
}
}
@@ -1306,17 +1298,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
-static int check_bbstart(const struct i915_gem_context *ctx,
- u32 *cmd, u32 offset, u32 length,
- u32 batch_len,
- u64 batch_start,
- u64 shadow_batch_start)
+static int check_bbstart(u32 *cmd, u32 offset, u32 length,
+ u32 batch_length,
+ u64 batch_addr,
+ u64 shadow_addr,
+ const unsigned long *jump_whitelist)
{
u64 jump_offset, jump_target;
u32 target_cmd_offset, target_cmd_index;
/* For igt compatibility on older platforms */
- if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ if (!jump_whitelist) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
return -EACCES;
}
@@ -1327,14 +1319,14 @@ static int check_bbstart(const struct i915_gem_context *ctx,
return -EINVAL;
}
- jump_target = *(u64*)(cmd+1);
- jump_offset = jump_target - batch_start;
+ jump_target = *(u64 *)(cmd + 1);
+ jump_offset = jump_target - batch_addr;
/*
* Any underflow of jump_target is guaranteed to be outside the range
* of a u32, so >= test catches both too large and too small
*/
- if (jump_offset >= batch_len) {
+ if (jump_offset >= batch_length) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target);
return -EINVAL;
@@ -1342,20 +1334,20 @@ static int check_bbstart(const struct i915_gem_context *ctx,
/*
* This cannot overflow a u32 because we already checked jump_offset
- * is within the BB, and the batch_len is a u32
+ * is within the BB, and the batch_length is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
- *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+ *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
if (target_cmd_index == offset)
return 0;
- if (ctx->jump_whitelist_cmds <= target_cmd_index) {
- DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
- return -EINVAL;
- } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ if (IS_ERR(jump_whitelist))
+ return PTR_ERR(jump_whitelist);
+
+ if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
return -EINVAL;
@@ -1364,54 +1356,40 @@ static int check_bbstart(const struct i915_gem_context *ctx,
return 0;
}
-static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+static unsigned long *alloc_whitelist(u32 batch_length)
{
- const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
- const u32 exact_size = BITS_TO_LONGS(batch_cmds);
- u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
- unsigned long *next_whitelist;
+ unsigned long *jmp;
- if (CMDPARSER_USES_GGTT(ctx->i915))
- return;
-
- if (batch_cmds <= ctx->jump_whitelist_cmds) {
- bitmap_zero(ctx->jump_whitelist, batch_cmds);
- return;
- }
-
-again:
- next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
- if (next_whitelist) {
- kfree(ctx->jump_whitelist);
- ctx->jump_whitelist = next_whitelist;
- ctx->jump_whitelist_cmds =
- next_size * BITS_PER_BYTE * sizeof(long);
- return;
- }
-
- if (next_size > exact_size) {
- next_size = exact_size;
- goto again;
- }
+ /*
+ * We expect batch_length to be less than 256KiB for known users,
+ * i.e. we need at most an 8KiB bitmap allocation which should be
+ * reasonably cheap due to kmalloc caches.
+ */
- DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
- bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+ /* Prefer to report transient allocation failure rather than hit oom */
+ jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!jmp)
+ return ERR_PTR(-ENOMEM);
- return;
+ return jmp;
}
#define LENGTH_BIAS 2
+static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
/**
- * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ctx: the context in which the batch is to execute
+ * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
- * @batch_obj: the batch buffer in question
- * @batch_start: Canonical base address of batch
- * @batch_start_offset: byte offset in the batch at which execution starts
- * @batch_len: length of the commands in batch_obj
- * @shadow_batch_obj: copy of the batch buffer in question
- * @shadow_batch_start: Canonical base address of shadow_batch_obj
+ * @batch: the batch buffer in question
+ * @batch_offset: byte offset in the batch at which execution starts
+ * @batch_length: length of the commands in batch_obj
+ * @shadow: validated copy of the batch buffer in question
+ * @trampoline: whether to emit a conditional trampoline at the end of the batch
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1419,38 +1397,46 @@ again:
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-
-int intel_engine_cmd_parser(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- u64 batch_start,
- u32 batch_start_offset,
- u32 batch_len,
- struct drm_i915_gem_object *shadow_batch_obj,
- u64 shadow_batch_start)
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline)
{
u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool needs_clflush_after = false;
+ unsigned long *jump_whitelist;
+ u64 batch_addr, shadow_addr;
int ret = 0;
- cmd = copy_batch(shadow_batch_obj, batch_obj,
- batch_start_offset, batch_len,
- &needs_clflush_after);
+ GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
+ GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
+ GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
+ batch->size));
+ GEM_BUG_ON(!batch_length);
+
+ cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
if (IS_ERR(cmd)) {
- DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+ DRM_DEBUG("CMD: Failed to copy batch\n");
return PTR_ERR(cmd);
}
- init_whitelist(ctx, batch_len);
+ jump_whitelist = NULL;
+ if (!trampoline)
+ /* Defer failure until attempted use */
+ jump_whitelist = alloc_whitelist(batch_length);
+
+ shadow_addr = gen8_canonical_addr(shadow->node.start);
+ batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
- batch_end = cmd + (batch_len / sizeof(*batch_end));
+ batch_end = cmd + batch_length / sizeof(*batch_end);
do {
u32 length;
@@ -1459,61 +1445,99 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx,
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
- DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
- *cmd);
+ DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
ret = -EINVAL;
- goto err;
+ break;
}
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
- length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
+ length = (*cmd & desc->length.mask) + LENGTH_BIAS;
if ((batch_end - cmd) < length) {
- DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
- *cmd,
- length,
- batch_end - cmd);
+ DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+ *cmd,
+ length,
+ batch_end - cmd);
ret = -EINVAL;
- goto err;
+ break;
}
if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES;
- goto err;
+ break;
}
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = check_bbstart(ctx, cmd, offset, length,
- batch_len, batch_start,
- shadow_batch_start);
-
- if (ret)
- goto err;
+ ret = check_bbstart(cmd, offset, length, batch_length,
+ batch_addr, shadow_addr,
+ jump_whitelist);
break;
}
- if (ctx->jump_whitelist_cmds > offset)
- set_bit(offset, ctx->jump_whitelist);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ __set_bit(offset, jump_whitelist);
cmd += length;
offset += length;
if (cmd >= batch_end) {
- DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
- goto err;
+ break;
}
} while (1);
- if (needs_clflush_after) {
- void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+ if (trampoline) {
+ /*
+ * With the trampoline, the shadow is executed twice.
+ *
+ * 1 - starting at offset 0, in privileged mode
+ * 2 - starting at offset batch_len, as non-privileged
+ *
+ * Only if the batch is valid and safe to execute, do we
+ * allow the first privileged execution to proceed. If not,
+ * we terminate the first batch and use the second batchbuffer
+ * entry to chain to the original unsafe non-privileged batch,
+ * leaving it to the HW to validate.
+ */
+ *batch_end = MI_BATCH_BUFFER_END;
+
+ if (ret) {
+ /* Batch unsafe to execute with privileges, cancel! */
+ cmd = page_mask_bits(shadow->obj->mm.mapping);
+ *cmd = MI_BATCH_BUFFER_END;
+
+ /* If batch is unsafe but valid, jump to the original */
+ if (ret == -EACCES) {
+ unsigned int flags;
+
+ flags = MI_BATCH_NON_SECURE_I965;
+ if (IS_HASWELL(engine->i915))
+ flags = MI_BATCH_NON_SECURE_HSW;
+
+ GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
+ __gen6_emit_bb_start(batch_end,
+ batch_addr,
+ flags);
+
+ ret = 0; /* allow execution */
+ }
+ }
+
+ if (shadow_needs_clflush(shadow->obj))
+ drm_clflush_virt_range(batch_end, 8);
+ }
+
+ if (shadow_needs_clflush(shadow->obj)) {
+ void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
-err:
- i915_gem_object_unpin_map(shadow_batch_obj);
+ if (!IS_ERR_OR_NULL(jump_whitelist))
+ kfree(jump_whitelist);
+ i915_gem_object_unpin_map(shadow->obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8016484ebcd3..d28468eaed57 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -61,24 +61,14 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
static int i915_capabilities(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- const char *msg;
- seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
- seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
- seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- msg = "n/a";
-#ifdef CONFIG_INTEL_IOMMU
- msg = enableddisabled(intel_iommu_gfx_mapped);
-#endif
- seq_printf(m, "iommu: %s\n", msg);
-
- intel_device_info_dump_flags(info, &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
- intel_driver_caps_print(&dev_priv->caps, &p);
+ intel_device_info_print_static(INTEL_INFO(i915), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915_modparams, &p);
@@ -891,7 +881,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
+ cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1633,21 +1623,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt.rps;
- u32 act_freq = rps->cur_freq;
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- act_freq = vlv_punit_read(dev_priv,
- PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
- act_freq = (act_freq >> 8) & 0xff;
- } else {
- act_freq = intel_get_cagf(rps,
- I915_READ(GEN6_RPSTAT1));
- }
- }
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
@@ -1656,7 +1631,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
intel_gpu_freq(rps, rps->cur_freq),
- intel_gpu_freq(rps, act_freq));
+ intel_rps_read_actual_frequency(rps));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->min_freq_softlimit),
@@ -1802,30 +1777,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct intel_guc_client *client = guc->execbuf_client;
if (!USES_GUC(dev_priv))
return -ENODEV;
i915_guc_log_info(m, dev_priv);
- if (!USES_GUC_SUBMISSION(dev_priv))
- return 0;
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- seq_printf(m, "\nDoorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
-
- seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
- seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
- client->priority,
- client->stage_id,
- client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
- client->doorbell_id, client->doorbell_offset);
/* Add more as required ... */
return 0;
@@ -2367,7 +2324,7 @@ out:
}
static void intel_seq_print_mode(struct seq_file *m, int tabs,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
int i;
@@ -2378,59 +2335,35 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
}
static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *intel_crtc,
- struct intel_encoder *intel_encoder)
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_connector *intel_connector;
- struct drm_encoder *encoder;
-
- encoder = &intel_encoder->base;
- seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
- encoder->base.id, encoder->name);
- for_each_connector_on_encoder(dev, encoder, intel_connector) {
- struct drm_connector *connector = &intel_connector->base;
- seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- struct drm_display_mode *mode = &crtc->mode;
- seq_printf(m, ", mode:\n");
- intel_seq_print_mode(m, 2, mode);
- } else {
- seq_putc(m, '\n');
- }
- }
-}
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_crtc *crtc = &intel_crtc->base;
- struct intel_encoder *intel_encoder;
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct drm_framebuffer *fb = plane_state->fb;
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
- if (fb)
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- fb->base.id, plane_state->src_x >> 16,
- plane_state->src_y >> 16, fb->width, fb->height);
- else
- seq_puts(m, "\tprimary plane disabled\n");
- for_each_encoder_on_crtc(dev, crtc, intel_encoder)
- intel_encoder_info(m, intel_crtc, intel_encoder);
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
}
static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
{
- struct drm_display_mode *mode = panel->fixed_mode;
+ const struct drm_display_mode *mode = panel->fixed_mode;
- seq_printf(m, "\tfixed mode:\n");
- intel_seq_print_mode(m, 2, mode);
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
static void intel_hdcp_info(struct seq_file *m,
@@ -2508,10 +2441,12 @@ static void intel_connector_info(struct seq_file *m,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct drm_display_mode *mode;
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
- seq_printf(m, "connector %d: type %s, status: %s\n",
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
@@ -2525,24 +2460,24 @@ static void intel_connector_info(struct seq_file *m,
drm_get_subpixel_order_name(connector->display_info.subpixel_order));
seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
- if (!intel_encoder)
+ if (!encoder)
return;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_LVDS:
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ if (encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
- intel_encoder->type == INTEL_OUTPUT_DDI)
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@@ -2589,70 +2524,88 @@ static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
rotation);
}
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_plane *intel_plane;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane_state *state;
- struct drm_plane *plane = &intel_plane->base;
- struct drm_format_name_buf format_name;
- char rot_str[48];
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
- if (!plane->state) {
- seq_puts(m, "plane->state is NULL!\n");
- continue;
- }
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
- state = plane->state;
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
- if (state->fb) {
- drm_get_format_name(state->fb->format->format,
- &format_name);
- } else {
- sprintf(format_name.str, "N/A");
- }
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
- plane_rotation(rot_str, sizeof(rot_str), state->rotation);
-
- seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
- plane->base.id,
- plane_type(intel_plane->base.type),
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- (state->src_x >> 16),
- ((state->src_x & 0xffff) * 15625) >> 10,
- (state->src_y >> 16),
- ((state->src_y & 0xffff) * 15625) >> 10,
- (state->src_w >> 16),
- ((state->src_w & 0xffff) * 15625) >> 10,
- (state->src_h >> 16),
- ((state->src_h & 0xffff) * 15625) >> 10,
- format_name.str,
- rot_str);
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
}
}
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
{
- struct intel_crtc_state *pipe_config;
- int num_scalers = intel_crtc->num_scalers;
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
int i;
- pipe_config = to_intel_crtc_state(intel_crtc->base.state);
-
/* Not all platformas have a scaler */
if (num_scalers) {
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
for (i = 0; i < num_scalers; i++) {
- struct intel_scaler *sc =
- &pipe_config->scaler_state.scalers[i];
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
i, yesno(sc->in_use), sc->mode);
@@ -2663,6 +2616,44 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
}
}
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2674,52 +2665,22 @@ static int i915_display_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ drm_modeset_lock_all(dev);
+
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *pipe_config;
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- pipe_config = to_intel_crtc_state(crtc->base.state);
-
- seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
- crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(pipe_config->base.active),
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- yesno(pipe_config->dither), pipe_config->pipe_bpp);
-
- if (pipe_config->base.active) {
- struct intel_plane *cursor =
- to_intel_plane(crtc->base.cursor);
-
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
- yesno(cursor->base.state->visible),
- cursor->base.state->crtc_x,
- cursor->base.state->crtc_y,
- cursor->base.state->crtc_w,
- cursor->base.state->crtc_h,
- cursor->cursor.base);
- intel_scaler_info(m, crtc);
- intel_plane_info(m, crtc);
- }
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
- drm_modeset_unlock(&crtc->base.mutex);
- }
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- mutex_lock(&dev->mode_config.mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+
+ drm_modeset_unlock_all(dev);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -2755,7 +2716,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
+ intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
return 0;
}
@@ -4161,11 +4122,11 @@ static int i915_drrs_ctl_set(void *data, u64 val)
crtc_state = to_intel_crtc_state(crtc->base.state);
- if (!crtc_state->base.active ||
+ if (!crtc_state->hw.active ||
!crtc_state->has_drrs)
goto out;
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
@@ -4177,7 +4138,7 @@ static int i915_drrs_ctl_set(void *data, u64 val)
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
- if (!(crtc_state->base.connector_mask &
+ if (!(crtc_state->uapi.connector_mask &
drm_connector_mask(connector)))
continue;
@@ -4236,14 +4197,14 @@ i915_fifo_underrun_reset_write(struct file *filp,
return ret;
crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->base.commit;
+ commit = crtc_state->uapi.commit;
if (commit) {
ret = wait_for_completion_interruptible(&commit->hw_done);
if (!ret)
ret = wait_for_completion_interruptible(&commit->flip_done);
}
- if (!ret && crtc_state->base.active) {
+ if (!ret && crtc_state->hw.active) {
DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
pipe_name(intel_crtc->pipe));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fee7dfee1f91..59525094d0e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,6 +61,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
@@ -618,7 +619,6 @@ err_bridge:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(&dev_priv->gt);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1052,7 +1052,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9)
+ if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
return;
if (IS_GEN9_LP(dev_priv))
@@ -1396,8 +1396,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
- intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
- intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1817,8 +1817,6 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
-
intel_gt_sanitize(&dev_priv->gt, true);
ret = i915_ggtt_enable_hw(dev_priv);
@@ -2662,18 +2660,12 @@ const struct dev_pm_ops i915_pm_ops = {
.runtime_resume = intel_runtime_resume,
};
-static const struct vm_operations_struct i915_gem_vm_ops = {
- .fault = i915_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = i915_compat_ioctl,
@@ -2720,7 +2712,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
@@ -2762,7 +2754,6 @@ static struct drm_driver driver = {
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
- .gem_vm_ops = &i915_gem_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -2773,7 +2764,8 @@ static struct drm_driver driver = {
.get_scanout_position = i915_get_crtc_scanoutpos,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
@@ -2784,7 +2776,3 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_drm.c"
-#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e29bc137e7ba..d05a968227f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -110,8 +110,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20191101"
-#define DRIVER_TIMESTAMP 1572604873
+#define DRIVER_DATE "20191223"
+#define DRIVER_TIMESTAMP 1577120893
struct drm_i915_gem_object;
@@ -273,11 +273,11 @@ struct drm_i915_display_funcs {
int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -290,10 +290,10 @@ struct drm_i915_display_funcs {
struct intel_initial_plane_config *);
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct intel_crtc_state *pipe_config,
- struct intel_atomic_state *old_state);
- void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
- struct intel_atomic_state *old_state);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void (*commit_modeset_enables)(struct intel_atomic_state *state);
void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
@@ -366,7 +366,6 @@ struct intel_fbc {
unsigned threshold;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
- unsigned int visible_pipes_mask;
struct intel_crtc *crtc;
struct drm_mm_node compressed_fb;
@@ -374,8 +373,8 @@ struct intel_fbc {
bool false_color;
- bool enabled;
bool active;
+ bool activated;
bool flip_pending;
bool underrun_detected;
@@ -387,9 +386,6 @@ struct intel_fbc {
* these problems.
*/
struct intel_fbc_state_cache {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
unsigned int mode_flags;
u32 hsw_bdw_pixel_rate;
@@ -418,6 +414,8 @@ struct intel_fbc {
const struct drm_format_info *format;
unsigned int stride;
} fb;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
} state_cache;
/*
@@ -428,9 +426,6 @@ struct intel_fbc {
* are supposed to read from it in order to program the registers.
*/
struct intel_fbc_reg_params {
- struct i915_vma *vma;
- unsigned long flags;
-
struct {
enum pipe pipe;
enum i9xx_plane_id i9xx_plane;
@@ -443,7 +438,9 @@ struct intel_fbc {
} fb;
int cfb_size;
- unsigned int gen9_wa_cfb_stride;
+ u16 gen9_wa_cfb_stride;
+ s8 fence_id;
+ bool plane_visible;
} params;
const char *no_fbc_reason;
@@ -621,19 +618,18 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
struct ddi_vbt_port_info {
/* Non-NULL if port present. */
const struct child_device_config *child;
int max_tmds_clock;
- /*
- * This is an index in the HDMI/DVI DDI buffer translation table.
- * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
- * populate this field.
- */
-#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
+ /* This is an index in the HDMI/DVI DDI buffer translation table. */
u8 hdmi_level_shift;
+ u8 hdmi_level_shift_set:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
@@ -724,8 +720,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
- int child_dev_num;
- struct child_device_config *child_dev;
+ struct list_head display_devices;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -891,6 +886,10 @@ struct intel_cdclk_state {
u8 voltage_level;
};
+struct i915_selftest_stash {
+ atomic_t counter;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -956,9 +955,6 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
- /* Context used internally to idle the GPU and setup initial state */
- struct i915_gem_context *kernel_context;
-
struct intel_engine_cs *engine[I915_NUM_ENGINES];
struct rb_root uabi_engines;
@@ -1233,7 +1229,8 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- unsigned int deratedbw[3]; /* for each QGV point */
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
u8 num_qgv_points;
u8 num_planes;
} max_bw[6];
@@ -1248,8 +1245,6 @@ struct drm_i915_private {
struct intel_gt gt;
struct {
- struct notifier_block pm_notifier;
-
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
struct list_head list;
@@ -1286,6 +1281,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
+ I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -1661,7 +1658,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_CANNONLAKE(dev_priv) || IS_GEN(dev_priv, 9))
+ IS_GEN_RANGE(dev_priv, 9, 10)
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
@@ -1842,14 +1839,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-
-struct i915_vma * __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags);
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -1862,9 +1852,6 @@ i915_mutex_lock_interruptible(struct drm_device *dev)
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- u32 handle, u64 *offset);
-int i915_gem_mmap_gtt_version(void);
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
@@ -1887,7 +1874,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -1956,14 +1942,13 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct i915_gem_context *cxt,
- struct intel_engine_cs *engine,
- struct drm_i915_gem_object *batch_obj,
- u64 user_batch_start,
- u32 batch_start_offset,
- u32 batch_len,
- struct drm_i915_gem_object *shadow_batch_obj,
- u64 shadow_batch_start);
+int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+ struct i915_vma *batch,
+ u32 batch_offset,
+ u32 batch_length,
+ struct i915_vma *shadow,
+ bool trampoline);
+#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
/* intel_device_info.c */
static inline struct intel_device_info *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d034fa413164..9ddcf17230e6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -44,20 +44,13 @@
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
-#include "gem/i915_gem_pm.h"
-#include "gt/intel_context.h"
+#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
-#include "gt/intel_gt_requests.h"
-#include "gt/intel_mocs.h"
-#include "gt/intel_reset.h"
-#include "gt/intel_renderstate.h"
-#include "gt/intel_rps.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
-#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
@@ -119,33 +112,65 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
- struct i915_vma *vma;
+ struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
LIST_HEAD(still_in_list);
- int ret = 0;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!atomic_read(&obj->bind_count))
+ return 0;
+
+ /*
+ * As some machines use ACPI to handle runtime-resume callbacks, and
+ * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
+ * as they are required by the shrinker. Ergo, we wake the device up
+ * first just in case.
+ */
+ wakeref = intel_runtime_pm_get(rpm);
+try_again:
+ ret = 0;
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
struct i915_address_space *vm = vma->vm;
- ret = -EBUSY;
+ list_move_tail(&vma->obj_link, &still_in_list);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
+ continue;
+
+ ret = -EAGAIN;
if (!i915_vm_tryopen(vm))
break;
- list_move_tail(&vma->obj_link, &still_in_list);
+ /* Prevent vma being freed by i915_vma_parked as we unbind */
+ vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
- if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))
- ret = i915_vma_unbind(vma);
+ if (vma) {
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
+
+ __i915_vma_put(vma);
+ }
i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
- list_splice(&still_in_list, &obj->vma.list);
+ list_splice_init(&still_in_list, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
+ rcu_barrier(); /* flush the i915_vm_release() */
+ goto try_again;
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+
return ret;
}
@@ -161,7 +186,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
@@ -169,7 +194,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
}
@@ -589,7 +614,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -631,11 +656,12 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(ggtt, &node);
@@ -721,7 +747,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
@@ -855,7 +881,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
list_for_each_entry_safe(obj, on,
&i915->ggtt.userfault_list, userfault_link)
- __i915_gem_object_release_mmap(obj);
+ __i915_gem_object_release_mmap_gtt(obj);
/*
* The fence will be lost when the device powers down. If any were
@@ -892,22 +918,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
-
- return i915_gem_object_pin(obj, vm, view, size, alignment,
- flags | PIN_GLOBAL);
-}
-
-struct i915_vma *
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view,
- u64 size,
- u64 alignment,
- u64 flags)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
int ret;
@@ -916,17 +928,19 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
- /* If the required space is larger than the available
+ /*
+ * If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
* vain. Worse, doing so may cause us to ping-pong
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
- if (obj->base.size > dev_priv->ggtt.mappable_end)
+ if (obj->base.size > ggtt->mappable_end)
return ERR_PTR(-E2BIG);
- /* If NONBLOCK is set the caller is optimistically
+ /*
+ * If NONBLOCK is set the caller is optimistically
* trying to cache the full object within the mappable
* aperture, and *must* have a fallback in place for
* situations where we cannot bind the object. We
@@ -942,11 +956,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
- obj->base.size > dev_priv->ggtt.mappable_end / 2)
+ obj->base.size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
- vma = i915_vma_instance(obj, vm, view);
+ vma = i915_vma_instance(obj, &ggtt->vm, view);
if (IS_ERR(vma))
return vma;
@@ -956,7 +970,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE &&
- vma->fence_size > dev_priv->ggtt.mappable_end / 2)
+ vma->fence_size > ggtt->mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -966,14 +980,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (vma->fence && !i915_gem_object_is_tiled(obj)) {
- mutex_lock(&vma->vm->mutex);
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_vma_revoke_fence(vma);
- mutex_unlock(&vma->vm->mutex);
+ mutex_unlock(&ggtt->vm.mutex);
if (ret)
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags);
+ ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1054,172 +1068,6 @@ out:
return err;
}
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
- struct intel_timeline *tl;
-
- tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- intel_context_timeline_unlock(tl);
- return 0;
-}
-
-static int __intel_engines_record_defaults(struct intel_gt *gt)
-{
- struct i915_request *requests[I915_NUM_ENGINES] = {};
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- /*
- * As we reset the gpu during very early sanitisation, the current
- * register state on the GPU should reflect its defaults values.
- * We load a context onto the hw (with restore-inhibit), then switch
- * over to a second context to save that default register state. We
- * can then prime every new context with that state so they all start
- * from the same default HW values.
- */
-
- for_each_engine(engine, gt, id) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- /* We must be able to switch to something! */
- GEM_BUG_ON(!engine->kernel_context);
- engine->serial++; /* force the kernel context switch */
-
- ce = intel_context_create(engine->kernel_context->gem_context,
- engine);
- if (IS_ERR(ce)) {
- err = PTR_ERR(ce);
- goto out;
- }
-
- rq = intel_context_create_request(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- intel_context_put(ce);
- goto out;
- }
-
- err = intel_engine_emit_ctx_wa(rq);
- if (err)
- goto err_rq;
-
- err = intel_renderstate_emit(rq);
- if (err)
- goto err_rq;
-
-err_rq:
- requests[id] = i915_request_get(rq);
- i915_request_add(rq);
- if (err)
- goto out;
- }
-
- /* Flush the default context image to memory, and enable powersaving. */
- if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- err = -EIO;
- goto out;
- }
-
- for (id = 0; id < ARRAY_SIZE(requests); id++) {
- struct i915_request *rq;
- struct i915_vma *state;
- void *vaddr;
-
- rq = requests[id];
- if (!rq)
- continue;
-
- GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
- &rq->hw_context->flags));
- state = rq->hw_context->state;
- if (!state)
- continue;
-
- /* Serialise with retirement on another CPU */
- err = __intel_context_flush_retire(rq->hw_context);
- if (err)
- goto out;
-
- /* We want to be able to unbind the state from the GGTT */
- GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
-
- /*
- * As we will hold a reference to the logical state, it will
- * not be torn down with the context, and importantly the
- * object will hold onto its vma (making it possible for a
- * stray GTT write to corrupt our defaults). Unmap the vma
- * from the GTT to prevent such accidents and reclaim the
- * space.
- */
- err = i915_vma_unbind(state);
- if (err)
- goto out;
-
- i915_gem_object_lock(state->obj);
- err = i915_gem_object_set_to_cpu_domain(state->obj, false);
- i915_gem_object_unlock(state->obj);
- if (err)
- goto out;
-
- i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
- /* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto out;
- }
-
- rq->engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_unpin_map(state->obj);
- }
-
-out:
- /*
- * If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. The quickest way we can accomplish
- * this is by declaring ourselves wedged.
- */
- if (err)
- intel_gt_set_wedged(gt);
-
- for (id = 0; id < ARRAY_SIZE(requests); id++) {
- struct intel_context *ce;
- struct i915_request *rq;
-
- rq = requests[id];
- if (!rq)
- continue;
-
- ce = rq->hw_context;
- i915_request_put(rq);
- intel_context_put(ce);
- }
- return err;
-}
-
-static int intel_engines_verify_workarounds(struct intel_gt *gt)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- return 0;
-
- for_each_engine(engine, gt, id) {
- if (intel_engine_verify_workarounds(engine, "load"))
- err = -EIO;
- }
-
- return err;
-}
-
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1229,8 +1077,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- intel_timelines_init(dev_priv);
-
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
@@ -1238,51 +1084,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uc_fetch_firmwares(&dev_priv->gt.uc);
intel_wopcm_init(&dev_priv->wopcm);
- /* This is just a security blanket to placate dragons.
- * On some systems, we very sporadically observe that the first TLBs
- * used by the CS may be stale, despite us poking the TLB reset. If
- * we hold the forcewake during initialisation these problems
- * just magically go away.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- intel_gt_init(&dev_priv->gt);
-
- ret = intel_engines_setup(&dev_priv->gt);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_unlock;
- }
-
- ret = i915_gem_init_contexts(dev_priv);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_scratch;
- }
-
- ret = intel_engines_init(&dev_priv->gt);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_context;
- }
-
- intel_uc_init(&dev_priv->gt.uc);
-
- ret = intel_gt_init_hw(&dev_priv->gt);
- if (ret)
- goto err_uc_init;
-
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(&dev_priv->gt);
- if (ret)
- goto err_init_hw;
-
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1294,23 +1101,9 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(&dev_priv->gt);
- if (ret)
- goto err_gt;
-
- ret = __intel_engines_record_defaults(&dev_priv->gt);
- if (ret)
- goto err_gt;
-
- ret = i915_inject_probe_error(dev_priv, -ENODEV);
- if (ret)
- goto err_gt;
-
- ret = i915_inject_probe_error(dev_priv, -EIO);
+ ret = intel_gt_init(&dev_priv->gt);
if (ret)
- goto err_gt;
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ goto err_unlock;
return 0;
@@ -1320,31 +1113,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_gt:
- intel_gt_set_wedged_on_init(&dev_priv->gt);
- i915_gem_suspend(dev_priv);
- i915_gem_suspend_late(dev_priv);
-
- i915_gem_drain_workqueue(dev_priv);
-err_init_hw:
- intel_uc_fini_hw(&dev_priv->gt.uc);
-err_uc_init:
- if (ret != -EIO) {
- intel_uc_fini(&dev_priv->gt.uc);
- intel_engines_cleanup(&dev_priv->gt);
- }
-err_context:
- if (ret != -EIO)
- i915_gem_driver_release__contexts(dev_priv);
-err_scratch:
- intel_gt_driver_release(&dev_priv->gt);
err_unlock:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ i915_gem_drain_workqueue(dev_priv);
if (ret != -EIO) {
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- intel_timelines_fini(dev_priv);
}
if (ret == -EIO) {
@@ -1388,27 +1162,24 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(&dev_priv->gt);
+ dev_priv->uabi_engines = RB_ROOT;
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- intel_uc_fini_hw(&dev_priv->gt.uc);
- intel_uc_fini(&dev_priv->gt.uc);
-
i915_gem_drain_freed_objects(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(&dev_priv->gt);
- i915_gem_driver_release__contexts(dev_priv);
intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- intel_timelines_fini(dev_priv);
+
+ i915_gem_driver_release__contexts(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -1430,6 +1201,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
+ i915_gem_init__contexts(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 5ff051764373..1753c84d6c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -30,6 +30,8 @@
#include <drm/drm_drv.h>
+#include "i915_utils.h"
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_DEBUG_GEM
@@ -39,6 +41,7 @@ struct drm_i915_private;
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_DUMP(); \
BUG(); \
} \
} while(0)
@@ -68,9 +71,10 @@ struct drm_i915_private;
pr_err(__VA_ARGS__); \
trace_printk(__VA_ARGS__); \
} while (0)
-#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP() \
+ do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0)
#define GEM_TRACE_DUMP_ON(expr) \
- do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
+ do { if (expr) GEM_TRACE_DUMP(); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
#define GEM_TRACE_ERR(...) do { } while (0)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..0697bedebeef 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -359,9 +359,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
*/
int i915_gem_evict_vm(struct i915_address_space *vm)
{
- struct list_head eviction_list;
- struct i915_vma *vma, *next;
- int ret;
+ int ret = 0;
lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
@@ -377,21 +375,30 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
return ret;
}
- INIT_LIST_HEAD(&eviction_list);
- list_for_each_entry(vma, &vm->bound_list, vm_link) {
- if (i915_vma_is_pinned(vma))
- continue;
+ do {
+ struct i915_vma *vma, *vn;
+ LIST_HEAD(eviction_list);
- __i915_vma_pin(vma);
- list_add(&vma->evict_link, &eviction_list);
- }
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
+ if (i915_vma_is_pinned(vma))
+ continue;
+
+ __i915_vma_pin(vma);
+ list_add(&vma->evict_link, &eviction_list);
+ }
+ if (list_empty(&eviction_list))
+ break;
+
+ ret = 0;
+ list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
+ __i915_vma_unpin(vma);
+ if (ret == 0)
+ ret = __i915_vma_unbind(vma);
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ } while (ret == 0);
- ret = 0;
- list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
- __i915_vma_unpin(vma);
- if (ret == 0)
- ret = __i915_vma_unbind(vma);
- }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6239a9adbf14..1efe58ad0ce9 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -53,6 +53,8 @@
#define DBG(...)
#endif
+#define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
+
/**
* DOC: Global GTT views
*
@@ -123,6 +125,16 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
+ spin_unlock_irq(&uncore->lock);
+}
+
+static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+
/*
* Note that as an uncached mmio write, this will flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
@@ -135,7 +147,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct drm_i915_private *i915 = ggtt->vm.i915;
- gen6_ggtt_invalidate(ggtt);
+ gen8_ggtt_invalidate(ggtt);
if (INTEL_GEN(i915) >= 12)
intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
@@ -522,7 +534,7 @@ void __i915_vm_close(struct i915_address_space *vm)
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
i915_gem_object_put(obj);
}
@@ -783,7 +795,7 @@ __set_pd_entry(struct i915_page_directory * const pd,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
/* Each thread pre-pins the pd, and we may have a thread per pde. */
- GEM_BUG_ON(atomic_read(px_used(pd)) > 2 * ARRAY_SIZE(pd->entry));
+ GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * ARRAY_SIZE(pd->entry));
atomic_inc(px_used(pd));
pd->entry[idx] = to;
@@ -1118,7 +1130,7 @@ static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
atomic_add(count, &pt->used);
/* All other pdes may be simultaneously removed */
- GEM_BUG_ON(atomic_read(&pt->used) > 2 * I915_PDES);
+ GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
*start += count;
}
} while (idx++, --len);
@@ -1363,12 +1375,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
* If everybody agrees to not to write into the scratch page,
* we can reuse it for all vm, keeping contexts and processes separate.
*/
- if (vm->has_read_only &&
- vm->i915->kernel_context &&
- vm->i915->kernel_context->vm) {
- struct i915_address_space *clone =
- rcu_dereference_protected(vm->i915->kernel_context->vm,
- true); /* static */
+ if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+ struct i915_address_space *clone = vm->gt->vm;
GEM_BUG_ON(!clone->has_read_only);
@@ -1683,6 +1691,28 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
+static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
+{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt;
+ unsigned int pde;
+
+ start = round_down(start, SZ_64K);
+ end = round_up(end, SZ_64K) - start;
+
+ mutex_lock(&ppgtt->flush);
+
+ gen6_for_each_pde(pt, pd, start, end, pde)
+ gen6_write_pde(ppgtt, pde, pt);
+
+ mb();
+ ioread32(ppgtt->pd_addr + pde - 1);
+ gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
+ mb();
+
+ mutex_unlock(&ppgtt->flush);
+}
+
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -1692,7 +1722,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
intel_wakeref_t wakeref;
u64 from = start;
unsigned int pde;
- bool flush = false;
int ret = 0;
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
@@ -1717,11 +1746,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
spin_lock(&pd->lock);
if (pd->entry[pde] == &vm->scratch[1]) {
pd->entry[pde] = pt;
- if (i915_vma_is_bound(ppgtt->vma,
- I915_VMA_GLOBAL_BIND)) {
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
- }
} else {
alloc = pt;
pt = pd->entry[pde];
@@ -1732,8 +1756,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
}
spin_unlock(&pd->lock);
- if (flush)
- gen6_ggtt_invalidate(vm->gt->ggtt);
+ if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
+ gen6_flush_pd(ppgtt, from, start);
goto out;
@@ -1788,11 +1812,12 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- i915_vma_destroy(ppgtt->vma);
+ __i915_vma_put(ppgtt->vma);
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
+ mutex_destroy(&ppgtt->flush);
mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt->base.pd);
}
@@ -1817,17 +1842,11 @@ static int pd_vma_bind(struct i915_vma *vma,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
- struct i915_page_table *pt;
- unsigned int pde;
px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
- gen6_write_pde(ppgtt, pde, pt);
-
- gen6_ggtt_invalidate(ggtt);
-
+ gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
return 0;
}
@@ -1876,6 +1895,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
i915_active_init(&vma->active, NULL, NULL);
+ kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops;
@@ -1917,9 +1937,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
if (!atomic_read(&ppgtt->pin_count)) {
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
}
if (!err)
atomic_inc(&ppgtt->pin_count);
@@ -1958,6 +1976,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ppgtt->flush);
mutex_init(&ppgtt->pin_mutex);
ppgtt_init(&ppgtt->base, &i915->gt);
@@ -1994,6 +2013,7 @@ err_scratch:
err_pd:
kfree(ppgtt->base.pd);
err_free:
+ mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt);
return ERR_PTR(err);
}
@@ -3041,7 +3061,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
- ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->invalidate = gen8_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
@@ -3281,7 +3301,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
{
- GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
+ GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
ggtt->invalidate = guc_ggtt_invalidate;
@@ -3291,13 +3311,13 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
{
/* XXX Temporary pardon for error unload */
- if (ggtt->invalidate == gen6_ggtt_invalidate)
+ if (ggtt->invalidate == gen8_ggtt_invalidate)
return;
/* We should only be called after i915_ggtt_enable_guc() */
GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
- ggtt->invalidate = gen6_ggtt_invalidate;
+ ggtt->invalidate = gen8_ggtt_invalidate;
ggtt->invalidate(ggtt);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 402283ce2864..31a4a96ddd0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -443,6 +443,7 @@ struct i915_ppgtt {
struct gen6_ppgtt {
struct i915_ppgtt base;
+ struct mutex flush;
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index cf8a8c3ef047..54fce81d5724 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,6 +2,7 @@
* SPDX-License-Identifier: MIT
*/
+#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index be127cd28931..3aa213684293 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -20,7 +20,10 @@ static LIST_HEAD(globals);
static atomic_t active;
static atomic_t epoch;
static struct park_work {
- struct rcu_work work;
+ struct delayed_work work;
+ struct rcu_head rcu;
+ unsigned long flags;
+#define PENDING 0
int epoch;
} park;
@@ -37,11 +40,33 @@ static void i915_globals_shrink(void)
global->shrink();
}
+static void __i915_globals_grace(struct rcu_head *rcu)
+{
+ /* Ratelimit parking as shrinking is quite slow */
+ schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
+}
+
+static void __i915_globals_queue_rcu(void)
+{
+ park.epoch = atomic_inc_return(&epoch);
+ if (!atomic_read(&active)) {
+ init_rcu_head(&park.rcu);
+ call_rcu(&park.rcu, __i915_globals_grace);
+ }
+}
+
static void __i915_globals_park(struct work_struct *work)
{
+ destroy_rcu_head(&park.rcu);
+
/* Confirm nothing woke up in the last grace period */
- if (park.epoch == atomic_read(&epoch))
- i915_globals_shrink();
+ if (park.epoch != atomic_read(&epoch)) {
+ __i915_globals_queue_rcu();
+ return;
+ }
+
+ clear_bit(PENDING, &park.flags);
+ i915_globals_shrink();
}
void __init i915_global_register(struct i915_global *global)
@@ -85,7 +110,7 @@ int __init i915_globals_init(void)
}
}
- INIT_RCU_WORK(&park.work, __i915_globals_park);
+ INIT_DELAYED_WORK(&park.work, __i915_globals_park);
return 0;
}
@@ -103,8 +128,9 @@ void i915_globals_park(void)
if (!atomic_dec_and_test(&active))
return;
- park.epoch = atomic_inc_return(&epoch);
- queue_rcu_work(system_wq, &park.work);
+ /* Queue cleanup after the next RCU grace period has freed slabs */
+ if (!test_and_set_bit(PENDING, &park.flags))
+ __i915_globals_queue_rcu();
}
void i915_globals_unpark(void)
@@ -113,12 +139,21 @@ void i915_globals_unpark(void)
atomic_inc(&active);
}
+static void __exit __i915_globals_flush(void)
+{
+ atomic_inc(&active); /* skip shrinking */
+
+ rcu_barrier(); /* wait for the work to be queued */
+ flush_delayed_work(&park.work);
+
+ atomic_dec(&active);
+}
+
void __exit i915_globals_exit(void)
{
- /* Flush any residual park_work */
- atomic_inc(&epoch);
- flush_rcu_work(&park.work);
+ GEM_BUG_ON(atomic_read(&active));
+ __i915_globals_flush();
__i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3c85cb0ee99f..fda0977d2059 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -599,9 +599,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_dump_flags(info, &p);
+ intel_device_info_print_static(info, &p);
+ intel_device_info_print_runtime(runtime, &p);
+ intel_device_info_print_topology(&runtime->sseu, &p);
intel_driver_caps_print(caps, &p);
- intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -1045,7 +1046,7 @@ i915_error_object_create(struct drm_i915_private *i915,
s = kmap(page);
ret = compress_page(compress, s, dst);
- kunmap(s);
+ kunmap(page);
drm_clflush_pages(&page, 1);
@@ -1220,7 +1221,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
static void record_request(const struct i915_request *request,
struct drm_i915_error_request *erq)
{
- const struct i915_gem_context *ctx = request->gem_context;
+ const struct i915_gem_context *ctx;
erq->flags = request->fence.flags;
erq->context = request->fence.context;
@@ -1231,8 +1232,11 @@ static void record_request(const struct i915_request *request,
erq->head = request->head;
erq->tail = request->tail;
+ erq->pid = 0;
rcu_read_lock();
- erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+ ctx = rcu_dereference(request->context->gem_context);
+ if (ctx)
+ erq->pid = pid_nr(ctx->pid);
rcu_read_unlock();
}
@@ -1300,25 +1304,34 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
static bool record_context(struct drm_i915_error_context *e,
const struct i915_request *rq)
{
- const struct i915_gem_context *ctx = rq->gem_context;
+ struct i915_gem_context *ctx;
+ struct task_struct *task;
+ bool capture;
- if (ctx->pid) {
- struct task_struct *task;
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (!ctx)
+ return false;
- rcu_read_lock();
- task = pid_task(ctx->pid, PIDTYPE_PID);
- if (task) {
- strcpy(e->comm, task->comm);
- e->pid = task->pid;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ task = pid_task(ctx->pid, PIDTYPE_PID);
+ if (task) {
+ strcpy(e->comm, task->comm);
+ e->pid = task->pid;
}
+ rcu_read_unlock();
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- return i915_gem_context_no_error_capture(ctx);
+ capture = i915_gem_context_no_error_capture(ctx);
+
+ i915_gem_context_put(ctx);
+ return capture;
}
struct capture_vma {
@@ -1454,7 +1467,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
capture = request_record_user_bo(request, ee, capture);
capture = capture_vma(capture,
- request->hw_context->state,
+ request->context->state,
&ee->ctx);
capture = capture_vma(capture,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dae00f7dd7df..42b79f577500 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2453,6 +2453,25 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
+static void
+gen11_display_irq_handler(struct drm_i915_private *i915)
+{
+ void __iomem * const regs = i915->uncore.regs;
+ const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
+
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ /*
+ * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
+ * for the display related bits.
+ */
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
+ gen8_de_irq_handler(i915, disp_ctl);
+ raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
+ GEN11_DISPLAY_IRQ_ENABLE);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+}
+
static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,
u32 (*intr_disable)(void __iomem * const regs),
@@ -2476,17 +2495,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- if (master_ctl & GEN11_DISPLAY_IRQ) {
- const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
-
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /*
- * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
- * for the display related bits.
- */
- gen8_de_irq_handler(i915, disp_ctl);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
- }
+ if (master_ctl & GEN11_DISPLAY_IRQ)
+ gen11_display_irq_handler(i915);
gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
@@ -2788,15 +2798,11 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
ibx_irq_reset(dev_priv);
}
-static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
- gen11_master_intr_disable(dev_priv->uncore.regs);
-
- gen11_gt_irq_reset(&dev_priv->gt);
-
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2825,13 +2831,24 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
- GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
- GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
}
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
+{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ gen11_master_intr_disable(dev_priv->uncore.regs);
+
+ gen11_gt_irq_reset(&dev_priv->gt);
+ gen11_display_irq_reset(dev_priv);
+
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+}
+
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
@@ -2976,6 +2993,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
hotplug_irqs = sde_ddi_mask | sde_tc_mask;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
@@ -3081,6 +3100,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
@@ -3597,7 +3619,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3702,7 +3724,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -3844,10 +3866,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
+ intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 07b04b0acb77..fdd550405fd3 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -27,6 +27,12 @@
#include "i915_memcpy.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define CI_BUG_ON(expr) BUG_ON(expr)
+#else
+#define CI_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
#ifdef CONFIG_AS_MOVNTDQA
@@ -34,7 +40,6 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
- len >>= 4;
while (len >= 4) {
asm("movntdqa (%0), %%xmm0\n"
"movntdqa 16(%0), %%xmm1\n"
@@ -59,6 +64,38 @@ static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
+
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
+{
+ kernel_fpu_begin();
+
+ while (len >= 4) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movntdqa 16(%0), %%xmm1\n"
+ "movntdqa 32(%0), %%xmm2\n"
+ "movntdqa 48(%0), %%xmm3\n"
+ "movups %%xmm0, (%1)\n"
+ "movups %%xmm1, 16(%1)\n"
+ "movups %%xmm2, 32(%1)\n"
+ "movups %%xmm3, 48(%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 64;
+ dst += 64;
+ len -= 4;
+ }
+ while (len--) {
+ asm("movntdqa (%0), %%xmm0\n"
+ "movups %%xmm0, (%1)\n"
+ :: "r" (src), "r" (dst) : "memory");
+ src += 16;
+ dst += 16;
+ }
+
+ kernel_fpu_end();
+}
+#else
+static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
+static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
#endif
/**
@@ -83,17 +120,47 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15))
return false;
-#ifdef CONFIG_AS_MOVNTDQA
if (static_branch_likely(&has_movntdqa)) {
if (likely(len))
- __memcpy_ntdqa(dst, src, len);
+ __memcpy_ntdqa(dst, src, len >> 4);
return true;
}
-#endif
return false;
}
+/**
+ * i915_unaligned_memcpy_from_wc: perform a mostly accelerated read from WC
+ * @dst: destination pointer
+ * @src: source pointer
+ * @len: how many bytes to copy
+ *
+ * Like i915_memcpy_from_wc(), the unaligned variant copies @len bytes from
+ * @src to @dst using * non-temporal instructions where available, but
+ * accepts that its arguments may not be aligned, but are valid for the
+ * potential 16-byte read past the end.
+ */
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len)
+{
+ unsigned long addr;
+
+ CI_BUG_ON(!i915_has_memcpy_from_wc());
+
+ addr = (unsigned long)src;
+ if (!IS_ALIGNED(addr, 16)) {
+ unsigned long x = min(ALIGN(addr, 16) - addr, len);
+
+ memcpy(dst, src, x);
+
+ len -= x;
+ dst += x;
+ src += x;
+ }
+
+ if (likely(len))
+ __memcpy_ntdqu(dst, src, DIV_ROUND_UP(len, 16));
+}
+
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
/*
diff --git a/drivers/gpu/drm/i915/i915_memcpy.h b/drivers/gpu/drm/i915/i915_memcpy.h
index 970d84b16987..e36d30edd987 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.h
+++ b/drivers/gpu/drm/i915/i915_memcpy.h
@@ -11,7 +11,9 @@
struct drm_i915_private;
void i915_memcpy_init_early(struct drm_i915_private *i915);
+
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+void i915_unaligned_memcpy_from_wc(void *dst, void *src, unsigned long len);
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
* as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1bb701d32a5d..9571611b4b16 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -30,6 +30,7 @@
#include "display/intel_fbdev.h"
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_globals.h"
#include "i915_selftest.h"
@@ -436,7 +437,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -493,7 +494,7 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
+ .ppgtt_type = INTEL_PPGTT_FULL,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -897,6 +898,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CML_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CML_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
INTEL_EHL_IDS(&intel_elkhartlake_info),
@@ -1003,6 +1006,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err > 0 ? -ENOTTY : err;
}
+ err = i915_perf_selftests(pdev);
+ if (err) {
+ i915_pci_remove(pdev);
+ return err > 0 ? -ENOTTY : err;
+ }
+
return 0;
}
@@ -1045,7 +1054,12 @@ static int __init i915_init(void)
return 0;
}
- return pci_register_driver(&i915_pci_driver);
+ err = pci_register_driver(&i915_pci_driver);
+ if (err)
+ return err;
+
+ i915_perf_sysctl_register();
+ return 0;
}
static void __exit i915_exit(void)
@@ -1053,6 +1067,7 @@ static void __exit i915_exit(void)
if (!i915_pci_driver.driver.owner)
return;
+ i915_perf_sysctl_unregister();
pci_unregister_driver(&i915_pci_driver);
i915_globals_exit();
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2ae14bc14931..84350c7bc711 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -387,6 +387,8 @@ struct i915_oa_config_bo {
struct i915_vma *vma;
};
+static struct ctl_table_header *sysctl_header;
+
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
void i915_oa_config_release(struct kref *ref)
@@ -1777,6 +1779,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+ *cs++ = MI_ARB_CHECK;
+
/*
* Transfer the result into the predicate register to be used for the
* predicated jump.
@@ -1966,7 +1970,9 @@ static int emit_oa_config(struct i915_perf_stream *stream,
if (err)
goto err_vma_put;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma_unpin;
@@ -2155,7 +2161,7 @@ static int gen8_modify_context(struct intel_context *ce,
lockdep_assert_held(&ce->pin_mutex);
- rq = i915_request_create(ce->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2317,9 +2323,6 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
*/
spin_lock(&i915->gem.contexts.lock);
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
- if (ctx == i915->kernel_context)
- continue;
-
if (!kref_get_unless_zero(&ctx->ref))
continue;
@@ -2455,6 +2458,13 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return emit_oa_config(stream, oa_config, oa_context(stream));
}
+static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
+{
+ return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ (stream->sample_flags & SAMPLE_OA_REPORT) ?
+ 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+}
+
static int gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2468,12 +2478,10 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
/*
- * If the user didn't require OA reports, instruct the
- * hardware not to emit ctx switch reports.
+ * If the user didn't require OA reports, instruct
+ * the hardware not to emit ctx switch reports.
*/
- !(stream->sample_flags & SAMPLE_OA_REPORT) ?
- _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
- _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+ oag_report_ctx_switches(stream));
intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
(GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
@@ -3101,7 +3109,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
stream->ops->enable(stream);
if (stream->hold_preemption)
- i915_gem_context_set_nopreempt(stream->ctx);
+ intel_context_set_nopreempt(stream->pinned_ctx);
}
/**
@@ -3127,7 +3135,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
stream->enabled = false;
if (stream->hold_preemption)
- i915_gem_context_clear_nopreempt(stream->ctx);
+ intel_context_clear_nopreempt(stream->pinned_ctx);
if (stream->ops->disable)
stream->ops->disable(stream);
@@ -3981,7 +3989,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
- static struct i915_oa_reg *regs;
+ struct i915_oa_reg *regs;
int err, id;
if (!perf->i915) {
@@ -4219,7 +4227,7 @@ static struct ctl_table dev_root[] = {
};
/**
- * i915_perf_init - initialize i915-perf state on module load
+ * i915_perf_init - initialize i915-perf state on module bind
* @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
@@ -4336,7 +4344,6 @@ void i915_perf_init(struct drm_i915_private *i915)
oa_sample_rate_hard_limit = 1000 *
(RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
- perf->sysctl_header = register_sysctl_table(dev_root);
mutex_init(&perf->metrics_lock);
idr_init(&perf->metrics_idr);
@@ -4372,6 +4379,16 @@ static int destroy_config(int id, void *p, void *data)
return 0;
}
+void i915_perf_sysctl_register(void)
+{
+ sysctl_header = register_sysctl_table(dev_root);
+}
+
+void i915_perf_sysctl_unregister(void)
+{
+ unregister_sysctl_table(sysctl_header);
+}
+
/**
* i915_perf_fini - Counter part to i915_perf_init()
* @i915: i915 device instance
@@ -4386,8 +4403,6 @@ void i915_perf_fini(struct drm_i915_private *i915)
idr_for_each(&perf->metrics_idr, destroy_config, perf);
idr_destroy(&perf->metrics_idr);
- unregister_sysctl_table(perf->sysctl_header);
-
memset(&perf->ops, 0, sizeof(perf->ops));
perf->i915 = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index 4ceebce72060..882fdd0a7680 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -23,6 +23,8 @@ void i915_perf_fini(struct drm_i915_private *i915);
void i915_perf_register(struct drm_i915_private *i915);
void i915_perf_unregister(struct drm_i915_private *i915);
int i915_perf_ioctl_version(void);
+void i915_perf_sysctl_register(void);
+void i915_perf_sysctl_unregister(void);
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 74ddc20a0d37..45e581455f5d 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -380,7 +380,6 @@ struct i915_perf {
struct drm_i915_private *i915;
struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
/*
* Lock associated with adding/modifying/removing OA configs
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2814218c5ba1..f3ef6700a5f2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
return ktime_to_ns(ktime_sub(ktime_get(), kt));
}
-static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
-{
- u64 val;
-
- /*
- * We think we are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- val = ktime_since(pmu->sleep_last);
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
- return val;
-}
-
-static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
-{
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
- if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- return val;
-}
-
static u64 get_rc6(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ bool awake = false;
u64 val;
- val = 0;
if (intel_gt_pm_get_if_awake(gt)) {
val = __get_rc6(gt);
intel_gt_pm_put_async(gt);
+ awake = true;
}
spin_lock_irqsave(&pmu->lock, flags);
- if (val)
- val = __pmu_update_rc6(pmu, val);
+ if (awake) {
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ /*
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+ }
+
+ if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+ val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
else
- val = __pmu_estimate_rc6(pmu);
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
spin_unlock_irqrestore(&pmu->lock, flags);
@@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
struct i915_pmu *pmu = &i915->pmu;
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
- __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
-static void unpark_rc6(struct drm_i915_private *i915)
-{
- struct i915_pmu *pmu = &i915->pmu;
-
- /* Estimate how long we slept and accumulate that into rc6 counters */
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
- __pmu_estimate_rc6(pmu);
-}
-
#else
static u64 get_rc6(struct intel_gt *gt)
@@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
}
static void park_rc6(struct drm_i915_private *i915) {}
-static void unpark_rc6(struct drm_i915_private *i915) {}
#endif
@@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
*/
__i915_pmu_maybe_start_timer(pmu);
- unpark_rc6(i915);
-
spin_unlock_irq(&pmu->lock);
}
@@ -292,6 +259,16 @@ add_sample(struct i915_pmu_sample *sample, u32 val)
sample->cur += val;
}
+static bool exclusive_mmio_access(const struct drm_i915_private *i915)
+{
+ /*
+ * We have to avoid concurrent mmio cache line access on gen7 or
+ * risk a machine hang. For a fun history lesson dig out the old
+ * userspace intel_gpu_top and run it on Ivybridge or Haswell!
+ */
+ return IS_GEN(i915, 7);
+}
+
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
@@ -302,8 +279,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
+ spinlock_t *mmio_lock;
unsigned long flags;
bool busy;
u32 val;
@@ -311,7 +292,12 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if (!intel_engine_pm_get_if_awake(engine))
continue;
- spin_lock_irqsave(&engine->uncore->lock, flags);
+ mmio_lock = NULL;
+ if (exclusive_mmio_access(i915))
+ mmio_lock = &engine->uncore->lock;
+
+ if (unlikely(mmio_lock))
+ spin_lock_irqsave(mmio_lock, flags);
val = ENGINE_READ_FW(engine, RING_CTL);
if (val == 0) /* powerwell off => engine idle */
@@ -342,7 +328,8 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
skip:
- spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ if (unlikely(mmio_lock))
+ spin_unlock_irqrestore(mmio_lock, flags);
intel_engine_pm_put_async(engine);
}
}
@@ -353,6 +340,13 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
sample->cur += mul_u32_u32(val, mul);
}
+static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+{
+ return pmu->enable &
+ (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
+}
+
static void
frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
@@ -361,19 +355,33 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ if (!frequency_sampling_enabled(pmu))
+ return;
+
+ /* Report 0/0 (actual/requested) frequency while parked. */
+ if (!intel_gt_pm_get_if_awake(gt))
+ return;
+
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
- val = rps->cur_freq;
- if (intel_gt_pm_get_if_awake(gt)) {
- val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
- val = intel_get_cagf(rps, val);
- intel_gt_pm_put_async(gt);
- }
+ /*
+ * We take a quick peek here without using forcewake
+ * so that we don't perturb the system under observation
+ * (forcewake => !rc6 => increased power use). We expect
+ * that if the read fails because it is outside of the
+ * mmio power well, then it will return 0 -- in which
+ * case we assume the system is running at the intended
+ * frequency. Fortunately, the read should rarely fail!
+ */
+ val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
+ if (val)
+ val = intel_rps_get_cagf(rps, val);
+ else
+ val = rps->cur_freq;
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(rps, val),
- period_ns / 1000);
+ intel_gpu_freq(rps, val), period_ns / 1000);
}
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
@@ -381,6 +389,8 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
+
+ intel_gt_pm_put_async(gt);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index bf52e3983631..6c1647c5daf2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -18,7 +18,7 @@ enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
__I915_SAMPLE_RC6,
- __I915_SAMPLE_RC6_ESTIMATED,
+ __I915_SAMPLE_RC6_LAST_REPORTED,
__I915_NUM_PMU_SAMPLERS
};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 73079b503724..bbfedeb00b7f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -5042,14 +5042,20 @@ enum {
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
/* BXT backlight register definition. */
#define _BXT_BLC_PWM_CTL1 0xC8250
@@ -5751,7 +5757,8 @@ enum {
#define PIPECONF_DOUBLE_WIDE (1 << 30)
#define I965_PIPECONF_ACTIVE (1 << 30)
#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */
-#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
+#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */
+#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
@@ -6359,6 +6366,7 @@ enum {
#define DISPPLANE_RGBX101010 (0x8 << 26)
#define DISPPLANE_RGBA101010 (0x9 << 26)
#define DISPPLANE_BGRX101010 (0xa << 26)
+#define DISPPLANE_BGRA101010 (0xb << 26)
#define DISPPLANE_RGBX161616 (0xc << 26)
#define DISPPLANE_RGBX888 (0xe << 26)
#define DISPPLANE_RGBA888 (0xf << 26)
@@ -6633,12 +6641,15 @@ enum {
#define SP_ENABLE (1 << 31)
#define SP_GAMMA_ENABLE (1 << 30)
#define SP_PIXFORMAT_MASK (0xf << 26)
-#define SP_FORMAT_YUV422 (0 << 26)
-#define SP_FORMAT_BGR565 (5 << 26)
-#define SP_FORMAT_BGRX8888 (6 << 26)
-#define SP_FORMAT_BGRA8888 (7 << 26)
-#define SP_FORMAT_RGBX1010102 (8 << 26)
-#define SP_FORMAT_RGBA1010102 (9 << 26)
+#define SP_FORMAT_YUV422 (0x0 << 26)
+#define SP_FORMAT_8BPP (0x2 << 26)
+#define SP_FORMAT_BGR565 (0x5 << 26)
+#define SP_FORMAT_BGRX8888 (0x6 << 26)
+#define SP_FORMAT_BGRA8888 (0x7 << 26)
+#define SP_FORMAT_RGBX1010102 (0x8 << 26)
+#define SP_FORMAT_RGBA1010102 (0x9 << 26)
+#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */
+#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */
#define SP_FORMAT_RGBX8888 (0xe << 26)
#define SP_FORMAT_RGBA8888 (0xf << 26)
#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
@@ -6789,6 +6800,7 @@ enum {
#define PLANE_CTL_YUV422_VYUY (3 << 16)
#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
+#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR (0 << 10)
@@ -7506,11 +7518,15 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define DSI1_NON_TE (1 << 31)
+#define DSI0_NON_TE (1 << 30)
#define ICL_AUX_CHANNEL_E (1 << 29)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define DSI1_TE (1 << 24)
+#define DSI0_TE (1 << 23)
#define BXT_DE_PORT_HP_DDIC (1 << 5)
#define BXT_DE_PORT_HP_DDIB (1 << 4)
#define BXT_DE_PORT_HP_DDIA (1 << 3)
@@ -7725,6 +7741,8 @@ enum {
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
+#define HSW_FRAME_START_DELAY_MASK (3 << 27)
+#define HSW_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7748,6 +7766,14 @@ enum {
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
#define WAIT_FOR_PCH_FLR_ACK (1 << 0)
+
+#define BW_BUDDY1_CTL _MMIO(0x45140)
+#define BW_BUDDY2_CTL _MMIO(0x45150)
+#define BW_BUDDY_DISABLE REG_BIT(31)
+
+#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
+#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
+
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
@@ -7905,6 +7931,10 @@ enum {
#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define FF_MODE2 _MMIO(0x6604)
+#define FF_MODE2_TDS_TIMER_MASK REG_GENMASK(23, 16)
+#define FF_MODE2_TDS_TIMER_128 REG_FIELD_PREP(FF_MODE2_TDS_TIMER_MASK, 4)
+
/* PCH */
#define PCH_DISPLAY_BASE 0xc0000u
@@ -8093,6 +8123,10 @@ enum {
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
+
+#define SHPD_FILTER_CNT _MMIO(0xc4038)
+#define SHPD_FILTER_CNT_500_ADJ 0x001D9
+
/* Icelake DSC Rate Control Range Parameter Registers */
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
@@ -8443,10 +8477,8 @@ enum {
#define TRANS_STATE_MASK (1 << 30)
#define TRANS_STATE_DISABLE (0 << 30)
#define TRANS_STATE_ENABLE (1 << 30)
-#define TRANS_FSYNC_DELAY_HB1 (0 << 27)
-#define TRANS_FSYNC_DELAY_HB2 (1 << 27)
-#define TRANS_FSYNC_DELAY_HB3 (2 << 27)
-#define TRANS_FSYNC_DELAY_HB4 (3 << 27)
+#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */
+#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */
#define TRANS_INTERLACE_MASK (7 << 21)
#define TRANS_PROGRESSIVE (0 << 21)
#define TRANS_INTERLACED (3 << 21)
@@ -8467,6 +8499,7 @@ enum {
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
@@ -9405,11 +9438,9 @@ enum skl_power_gate {
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
#define _ICL_AUX_ANAOVRD1_A 0x162398
#define _ICL_AUX_ANAOVRD1_B 0x6C398
-#define _TGL_AUX_ANAOVRD1_C 0x160398
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
_ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B, \
- _TGL_AUX_ANAOVRD1_C))
+ _ICL_AUX_ANAOVRD1_B))
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
@@ -9671,6 +9702,7 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_EDP_INPUT_D_ONOFF (7 << 12)
#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
@@ -10776,6 +10808,57 @@ enum skl_power_gate {
#define ICL_ESC_CLK_DIV_SHIFT 0
#define DSI_MAX_ESC_CLK 20000 /* in KHz */
+#define _DSI_CMD_FRMCTL_0 0x6b034
+#define _DSI_CMD_FRMCTL_1 0x6b834
+#define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \
+ _DSI_CMD_FRMCTL_0,\
+ _DSI_CMD_FRMCTL_1)
+#define DSI_FRAME_UPDATE_REQUEST (1 << 31)
+#define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29)
+#define DSI_NULL_PACKET_ENABLE (1 << 28)
+#define DSI_FRAME_IN_PROGRESS (1 << 0)
+
+#define _DSI_INTR_MASK_REG_0 0x6b070
+#define _DSI_INTR_MASK_REG_1 0x6b870
+#define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_MASK_REG_0,\
+ _DSI_INTR_MASK_REG_1)
+
+#define _DSI_INTR_IDENT_REG_0 0x6b074
+#define _DSI_INTR_IDENT_REG_1 0x6b874
+#define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \
+ _DSI_INTR_IDENT_REG_0,\
+ _DSI_INTR_IDENT_REG_1)
+#define DSI_TE_EVENT (1 << 31)
+#define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30)
+#define DSI_TX_DATA (1 << 29)
+#define DSI_ULPS_ENTRY_DONE (1 << 28)
+#define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27)
+#define DSI_HOST_CHKSUM_ERROR (1 << 26)
+#define DSI_HOST_MULTI_ECC_ERROR (1 << 25)
+#define DSI_HOST_SINGL_ECC_ERROR (1 << 24)
+#define DSI_HOST_CONTENTION_DETECTED (1 << 23)
+#define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22)
+#define DSI_HOST_TIMEOUT_ERROR (1 << 21)
+#define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20)
+#define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19)
+#define DSI_FRAME_UPDATE_DONE (1 << 16)
+#define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15)
+#define DSI_INVALID_TX_LENGTH (1 << 13)
+#define DSI_INVALID_VC (1 << 12)
+#define DSI_INVALID_DATA_TYPE (1 << 11)
+#define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10)
+#define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9)
+#define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8)
+#define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7)
+#define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6)
+#define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5)
+#define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4)
+#define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3)
+#define DSI_EOT_SYNC_ERROR (1 << 2)
+#define DSI_SOT_SYNC_ERROR (1 << 1)
+#define DSI_SOT_ERROR (1 << 0)
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -11380,6 +11463,7 @@ enum skl_power_gate {
#define CMD_MODE_TE_GATE (0x1 << 28)
#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define TE_SOURCE_GPIO (1 << 27)
#define LINK_READY (1 << 20)
#define PIX_FMT_MASK (0x3 << 16)
#define PIX_FMT_SHIFT 16
@@ -11632,13 +11716,18 @@ enum skl_power_gate {
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
-#define GEN9_GFX_MOCS(i) _MMIO(0xc800 + (i) * 4) /* Graphics MOCS registers */
-#define GEN9_MFX0_MOCS(i) _MMIO(0xc900 + (i) * 4) /* Media 0 MOCS registers */
-#define GEN9_MFX1_MOCS(i) _MMIO(0xca00 + (i) * 4) /* Media 1 MOCS registers */
-#define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
-#define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
-/* Media decoder 2 MOCS registers */
-#define GEN11_MFX2_MOCS(i) _MMIO(0x10000 + (i) * 4)
+#define __GEN9_RCS0_MOCS0 0xc800
+#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS0_MOCS0 0xc900
+#define GEN9_MFX0_MOCS(i) _MMIO(__GEN9_VCS0_MOCS0 + (i) * 4)
+#define __GEN9_VCS1_MOCS0 0xca00
+#define GEN9_MFX1_MOCS(i) _MMIO(__GEN9_VCS1_MOCS0 + (i) * 4)
+#define __GEN9_VECS0_MOCS0 0xcb00
+#define GEN9_VEBOX_MOCS(i) _MMIO(__GEN9_VECS0_MOCS0 + (i) * 4)
+#define __GEN9_BCS0_MOCS0 0xcc00
+#define GEN9_BLT_MOCS(i) _MMIO(__GEN9_BCS0_MOCS0 + (i) * 4)
+#define __GEN11_VCS2_MOCS0 0x10000
+#define GEN11_MFX2_MOCS(i) _MMIO(__GEN11_VCS2_MOCS0 + (i) * 4)
#define GEN10_SCRATCH_LNCF2 _MMIO(0xb0a0)
#define PMFLUSHDONE_LNICRSDROP (1 << 20)
@@ -11994,7 +12083,7 @@ enum skl_power_gate {
/* This register controls the Display State Buffer (DSB) engines. */
#define _DSBSL_INSTANCE_BASE 0x70B00
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
- (pipe) * 0x1000 + (id) * 100)
+ (pipe) * 0x1000 + (id) * 0x100)
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index bbd71af00a91..44a0d1a950c5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -57,11 +57,13 @@ static struct i915_global_request {
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
- return "i915";
+ return dev_name(to_request(fence)->i915->drm.dev);
}
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ const struct i915_gem_context *ctx;
+
/*
* The timeline struct (as part of the ppgtt underneath a context)
* may be freed when the request is no longer in use by the GPU.
@@ -74,7 +76,11 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->gem_context->name ?: "[i915]";
+ ctx = i915_request_gem_context(to_request(fence));
+ if (!ctx)
+ return "[" DRIVER_NAME "]";
+
+ return ctx->name;
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -188,7 +194,7 @@ static void free_capture_list(struct i915_request *request)
{
struct i915_capture_list *capture;
- capture = request->capture_list;
+ capture = fetch_and_zero(&request->capture_list);
while (capture) {
struct i915_capture_list *next = capture->next;
@@ -214,7 +220,7 @@ static void remove_from_engine(struct i915_request *rq)
spin_lock(&engine->active.lock);
locked = engine;
}
- list_del(&rq->sched.link);
+ list_del_init(&rq->sched.link);
spin_unlock_irq(&locked->active.lock);
}
@@ -223,10 +229,7 @@ bool i915_request_retire(struct i915_request *rq)
if (!i915_request_completed(rq))
return false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
trace_i915_request_retire(rq);
@@ -272,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
remove_from_client(rq);
list_del(&rq->link);
- intel_context_exit(rq->hw_context);
- intel_context_unpin(rq->hw_context);
+ intel_context_exit(rq->context);
+ intel_context_unpin(rq->context);
free_capture_list(rq);
i915_sched_node_fini(&rq->sched);
@@ -287,10 +290,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- rq->engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
+ RQ_TRACE(rq, "\n");
GEM_BUG_ON(!i915_request_completed(rq));
@@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
}
static int
-__i915_request_await_execution(struct i915_request *rq,
- struct i915_request *signal,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal),
- gfp_t gfp)
+__await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal),
+ gfp_t gfp)
{
struct execute_cb *cb;
@@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
}
spin_unlock_irq(&signal->lock);
+ /* Copy across semaphore status as we need the same behaviour */
+ rq->sched.flags |= signal->sched.flags;
return 0;
}
@@ -349,10 +351,7 @@ bool __i915_request_submit(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
bool result = false;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
@@ -376,7 +375,7 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (i915_gem_context_is_banned(request->gem_context))
+ if (intel_context_is_banned(request->context))
i915_request_skip(request, -EIO);
/*
@@ -415,7 +414,7 @@ xfer: /* We may be recursing from the signal callback of another i915 fence */
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
- intel_engine_queue_breadcrumbs(engine);
+ intel_engine_signal_breadcrumbs(engine);
__notify_execute_cb(request);
@@ -441,10 +440,7 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ RQ_TRACE(request, "\n");
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->active.lock);
@@ -586,6 +582,21 @@ out:
return kmem_cache_alloc(global.slab_requests, gfp);
}
+static void __i915_request_ctor(void *arg)
+{
+ struct i915_request *rq = arg;
+
+ spin_lock_init(&rq->lock);
+ i915_sched_node_init(&rq->sched);
+ i915_sw_fence_init(&rq->submit, submit_notify);
+ i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+ rq->file_priv = NULL;
+ rq->capture_list = NULL;
+
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
@@ -643,35 +654,32 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
goto err_free;
rq->i915 = ce->engine->i915;
- rq->hw_context = ce;
- rq->gem_context = ce->gem_context;
+ rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
+ rq->flags = 0;
- rcu_assign_pointer(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
- rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- spin_lock_init(&rq->lock);
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
tl->fence_context, seqno);
/* We bump the ref for the fence chain */
- i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
- i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+ i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+ i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
- i915_sched_node_init(&rq->sched);
+ i915_sched_node_reinit(&rq->sched);
- /* No zalloc, must clear what we need by hand */
- rq->file_priv = NULL;
+ /* No zalloc, everything must be cleared after use */
rq->batch = NULL;
- rq->capture_list = NULL;
- rq->flags = 0;
-
- INIT_LIST_HEAD(&rq->execute_cb);
+ GEM_BUG_ON(rq->file_priv);
+ GEM_BUG_ON(rq->capture_list);
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
/*
* Reserve space in the ring buffer for all the commands required to
@@ -753,34 +761,37 @@ err_unlock:
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- struct intel_timeline *tl;
struct dma_fence *fence;
int err;
GEM_BUG_ON(i915_request_timeline(rq) ==
rcu_access_pointer(signal->timeline));
+ fence = NULL;
rcu_read_lock();
- tl = rcu_dereference(signal->timeline);
- if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
- tl = NULL;
- rcu_read_unlock();
- if (!tl) /* already started or maybe even completed */
- return 0;
+ spin_lock_irq(&signal->lock);
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link,
+ &rcu_dereference(signal->timeline)->requests)) {
+ struct i915_request *prev = list_prev_entry(signal, link);
- fence = ERR_PTR(-EBUSY);
- if (mutex_trylock(&tl->mutex)) {
- fence = NULL;
- if (!i915_request_started(signal) &&
- !list_is_first(&signal->link, &tl->requests)) {
- signal = list_prev_entry(signal, link);
- fence = dma_fence_get(&signal->fence);
+ /*
+ * Peek at the request before us in the timeline. That
+ * request will only be valid before it is retired, so
+ * after acquiring a reference to it, confirm that it is
+ * still part of the signaler's timeline.
+ */
+ if (i915_request_get_rcu(prev)) {
+ if (list_next_entry(prev, link) == signal)
+ fence = &prev->fence;
+ else
+ i915_request_put(prev);
}
- mutex_unlock(&tl->mutex);
}
- intel_timeline_put(tl);
- if (IS_ERR_OR_NULL(fence))
- return PTR_ERR_OR_ZERO(fence);
+ spin_unlock_irq(&signal->lock);
+ rcu_read_unlock();
+ if (!fence)
+ return 0;
err = 0;
if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
@@ -811,31 +822,21 @@ already_busywaiting(struct i915_request *rq)
}
static int
-emit_semaphore_wait(struct i915_request *to,
- struct i915_request *from,
- gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ u32 seqno)
{
const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
- int len;
+ int len, err;
u32 *cs;
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
- /* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
- goto await_fence;
-
- if (i915_request_await_start(to, from) < 0)
- goto await_fence;
-
- /* Only submit our spinner after the signaler is running! */
- if (__i915_request_await_execution(to, from, NULL, gfp))
- goto await_fence;
-
/* We need to pin the signaler's HWSP until we are finished reading. */
- if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
- goto await_fence;
+ err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
+ if (err)
+ return err;
len = 4;
if (has_token)
@@ -858,7 +859,7 @@ emit_semaphore_wait(struct i915_request *to,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD) +
has_token;
- *cs++ = from->fence.seqno;
+ *cs++ = seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
if (has_token) {
@@ -867,6 +868,28 @@ emit_semaphore_wait(struct i915_request *to,
}
intel_ring_advance(to, cs);
+ return 0;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (already_busywaiting(to) & from->engine->mask)
+ goto await_fence;
+
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
+
+ /* Only submit our spinner after the signaler is running! */
+ if (__await_execution(to, from, NULL, gfp))
+ goto await_fence;
+
+ if (__emit_semaphore_wait(to, from, from->fence.seqno))
+ goto await_fence;
+
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -894,18 +917,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return ret;
}
- if (to->engine == from->engine) {
+ if (to->engine == from->engine)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- } else if (intel_engine_has_semaphores(to->engine) &&
- to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ else if (intel_context_use_semaphores(to->context))
ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- } else {
+ else
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
- }
if (ret < 0)
return ret;
@@ -945,8 +966,10 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* Requests on the same timeline are explicitly ordered, along
@@ -980,6 +1003,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return 0;
}
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl,
+ fence->context,
+ fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+ struct i915_request *from,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
+{
+ int err;
+
+ /* Submit both requests at the same time */
+ err = __await_execution(to, from, hook, I915_FENCE_GFP);
+ if (err)
+ return err;
+
+ /* Squash repeated depenendices to the same timelines */
+ if (intel_timeline_sync_has_start(i915_request_timeline(to),
+ &from->fence))
+ return 0;
+
+ /* Ensure both start together [after all semaphores in signal] */
+ if (intel_engine_has_semaphores(to->engine))
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ else
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ /* Couple the dependency tree for PI on this exposed to->fence */
+ if (to->engine->schedule) {
+ err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+ if (err < 0)
+ return err;
+ }
+
+ return intel_timeline_sync_set_start(i915_request_timeline(to),
+ &from->fence);
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
@@ -1002,8 +1076,10 @@ i915_request_await_execution(struct i915_request *rq,
do {
fence = *child++;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ i915_sw_fence_set_error_once(&rq->submit, fence->error);
continue;
+ }
/*
* We don't squash repeated fence dependencies here as we
@@ -1013,8 +1089,7 @@ i915_request_await_execution(struct i915_request *rq,
if (dma_fence_is_i915(fence))
ret = __i915_request_await_execution(rq,
to_request(fence),
- hook,
- I915_FENCE_GFP);
+ hook);
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
@@ -1180,8 +1255,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
u32 *cs;
- GEM_TRACE("%s fence %llx:%lld\n",
- engine->name, rq->fence.context, rq->fence.seqno);
+ RQ_TRACE(rq, "\n");
/*
* To ensure that this call will not fail, space for its emissions
@@ -1227,8 +1301,8 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq)
{
- struct i915_sched_attr attr = rq->gem_context->sched;
struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
struct i915_request *prev;
lockdep_assert_held(&tl->mutex);
@@ -1238,6 +1312,9 @@ void i915_request_add(struct i915_request *rq)
prev = __i915_request_commit(rq);
+ if (rcu_access_pointer(rq->context->gem_context))
+ attr = i915_request_gem_context(rq)->sched;
+
/*
* Boost actual workloads past semaphores!
*
@@ -1533,10 +1610,14 @@ static struct i915_global_request global = { {
int __init i915_global_request_init(void)
{
- global.slab_requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
+ global.slab_requests =
+ kmem_cache_create("i915_request",
+ sizeof(struct i915_request),
+ __alignof__(struct i915_request),
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU,
+ __i915_request_ctor);
if (!global.slab_requests)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 96991d64759c..565322640378 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,8 +28,10 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gem/i915_gem_context_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
+#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
@@ -41,14 +43,19 @@
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
-struct intel_timeline;
-struct intel_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
struct i915_vma *vma;
};
+#define RQ_TRACE(rq, fmt, ...) do { \
+ const struct i915_request *rq__ = (rq); \
+ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt, \
+ rq__->fence.context, rq__->fence.seqno, \
+ hwsp_seqno(rq__), ##__VA_ARGS__); \
+} while (0)
+
enum {
/*
* I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
@@ -109,9 +116,8 @@ struct i915_request {
* i915_request_free() will then decrement the refcount on the
* context.
*/
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_context *hw_context;
+ struct intel_context *context;
struct intel_ring *ring;
struct intel_timeline __rcu *timeline;
struct list_head signal_link;
@@ -144,6 +150,10 @@ struct i915_request {
union {
wait_queue_entry_t submitq;
struct i915_sw_dma_fence_cb dmaq;
+ struct i915_request_duration_cb {
+ struct dma_fence_cb cb;
+ ktime_t emitted;
+ } duration;
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
@@ -176,7 +186,7 @@ struct i915_request {
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
- struct intel_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline __rcu *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -454,6 +464,13 @@ i915_request_timeline(struct i915_request *rq)
lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
}
+static inline struct i915_gem_context *
+i915_request_gem_context(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->context->gem_context, true);
+}
+
static inline struct intel_timeline *
i915_request_active_timeline(struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 010d67f48ad9..bf87c70bfdd9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -213,7 +213,7 @@ static void kick_submission(struct intel_engine_cs *engine,
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
- if (inflight->hw_context == rq->hw_context)
+ if (inflight->context == rq->context)
goto unlock;
engine->execlists.queue_priority_hint = prio;
@@ -387,9 +387,19 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->signalers_list);
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
+
+ i915_sched_node_reinit(node);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0;
node->flags = 0;
+
+ GEM_BUG_ON(!list_empty(&node->signalers_list));
+ GEM_BUG_ON(!list_empty(&node->waiters_list));
+ GEM_BUG_ON(!list_empty(&node->link));
}
static struct i915_dependency *
@@ -474,13 +484,13 @@ void i915_sched_node_fini(struct i915_sched_node *node)
* so we may be called out-of-order.
*/
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!node_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->signalers_list);
/* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -491,6 +501,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
+ INIT_LIST_HEAD(&node->waiters_list);
spin_unlock_irq(&schedule_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..d1dc4efef77b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal,
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 4d88205de51b..98bcb6fa0ab4 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -36,6 +36,7 @@ struct i915_selftest {
char *filter;
int mock;
int live;
+ int perf;
};
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
@@ -45,6 +46,7 @@ extern struct i915_selftest i915_selftest;
int i915_mock_selftests(void);
int i915_live_selftests(struct pci_dev *pdev);
+int i915_perf_selftests(struct pci_dev *pdev);
/* We extract the function declarations from i915_mock_selftests.h and
* i915_live_selftests.h Add your unit test declarations there!
@@ -61,6 +63,7 @@ int i915_live_selftests(struct pci_dev *pdev);
#undef selftest
#define selftest(name, func) int func(struct drm_i915_private *i915);
#include "selftests/i915_live_selftests.h"
+#include "selftests/i915_perf_selftests.h"
#undef selftest
struct i915_subtest {
@@ -109,6 +112,7 @@ int __i915_subtests(const char *caller,
static inline int i915_mock_selftests(void) { return 0; }
static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
+static inline int i915_perf_selftests(struct pci_dev *pdev) { return 0; }
#define I915_SELFTEST_DECLARE(x)
#define I915_SELFTEST_ONLY(x) 0
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6a88db291252..51ba97daf2a0 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -12,6 +12,12 @@
#include "i915_sw_fence.h"
#include "i915_selftest.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
+#else
+#define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
static DEFINE_SPINLOCK(i915_sw_fence_lock);
@@ -218,13 +224,21 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{
BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
+ __init_waitqueue_head(&fence->wait, name, key);
+ fence->flags = (unsigned long)fn;
+
+ i915_sw_fence_reinit(fence);
+}
+
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
debug_fence_init(fence);
- __init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
fence->error = 0;
- fence->flags = (unsigned long)fn;
+ I915_SW_FENCE_BUG_ON(!fence->flags);
+ I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
}
void i915_sw_fence_commit(struct i915_sw_fence *fence)
@@ -414,8 +428,10 @@ static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
struct i915_sw_fence *fence;
fence = xchg(&cb->base.fence, NULL);
- if (fence)
+ if (fence) {
+ i915_sw_fence_set_error_once(fence, dma->error);
i915_sw_fence_complete(fence);
+ }
irq_work_queue(&cb->work);
}
@@ -443,8 +459,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb = kmalloc(timeout ?
sizeof(struct i915_sw_dma_fence_cb_timer) :
@@ -454,7 +472,12 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (!gfpflags_allow_blocking(gfp))
return -ENOMEM;
- return dma_fence_wait(dma, false);
+ ret = dma_fence_wait(dma, false);
+ if (ret)
+ return ret;
+
+ i915_sw_fence_set_error_once(fence, dma->error);
+ return 0;
}
cb->fence = fence;
@@ -504,8 +527,10 @@ int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
debug_fence_assert(fence);
- if (dma_fence_is_signaled(dma))
+ if (dma_fence_is_signaled(dma)) {
+ i915_sw_fence_set_error_once(fence, dma->error);
return 0;
+ }
cb->fence = fence;
i915_sw_fence_await(fence);
@@ -539,8 +564,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence **shared;
unsigned int count, i;
- ret = dma_resv_get_fences_rcu(resv,
- &excl, &count, &shared);
+ ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index ab7d58bd0b9d..19e806ce43bc 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -54,6 +54,8 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
void i915_sw_fence_fini(struct i915_sw_fence *fence);
#else
@@ -110,7 +112,8 @@ static inline void i915_sw_fence_wait(struct i915_sw_fence *fence)
static inline void
i915_sw_fence_set_error_once(struct i915_sw_fence *fence, int error)
{
- cmpxchg(&fence->error, 0, error);
+ if (unlikely(error))
+ cmpxchg(&fence->error, 0, error);
}
#endif /* _I915_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index 07552cd544f2..997b2998f1f2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -6,6 +6,13 @@
#include "i915_sw_fence_work.h"
+static void fence_complete(struct dma_fence_work *f)
+{
+ if (f->ops->release)
+ f->ops->release(f);
+ dma_fence_signal(&f->dma);
+}
+
static void fence_work(struct work_struct *work)
{
struct dma_fence_work *f = container_of(work, typeof(*f), work);
@@ -14,7 +21,8 @@ static void fence_work(struct work_struct *work)
err = f->ops->work(f);
if (err)
dma_fence_set_error(&f->dma, err);
- dma_fence_signal(&f->dma);
+
+ fence_complete(f);
dma_fence_put(&f->dma);
}
@@ -32,7 +40,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
dma_fence_get(&f->dma);
queue_work(system_unbound_wq, &f->work);
} else {
- dma_fence_signal(&f->dma);
+ fence_complete(f);
}
break;
@@ -60,9 +68,6 @@ static void fence_release(struct dma_fence *fence)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
- if (f->ops->release)
- f->ops->release(f);
-
i915_sw_fence_fini(&f->chain);
BUILD_BUG_ON(offsetof(typeof(*f), dma));
@@ -78,12 +83,11 @@ static const struct dma_fence_ops fence_ops = {
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops)
{
+ f->ops = ops;
spin_lock_init(&f->lock);
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
i915_sw_fence_init(&f->chain, fence_notify);
INIT_WORK(&f->work, fence_work);
-
- f->ops = ops;
}
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 65476909d1bf..ad2b1b833d7b 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -259,33 +259,18 @@ static const struct bin_attribute dpf_attrs_1 = {
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
- intel_wakeref_t wakeref;
- u32 freq;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_punit_get(dev_priv);
- freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
-
- freq = (freq >> 8) & 0xff;
- } else {
- freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ intel_rps_read_actual_frequency(rps));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(rps, rps->cur_freq));
@@ -293,8 +278,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &i915->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(rps, rps->boost_freq));
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7ef7a1e1664c..233a97a2c276 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -341,7 +341,7 @@ TRACE_EVENT(intel_disable_plane,
/* pipe updates */
-TRACE_EVENT(i915_pipe_update_start,
+TRACE_EVENT(intel_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -366,7 +366,7 @@ TRACE_EVENT(i915_pipe_update_start,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_vblank_evaded,
+TRACE_EVENT(intel_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc),
@@ -391,7 +391,7 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
__entry->scanline, __entry->min, __entry->max)
);
-TRACE_EVENT(i915_pipe_update_end,
+TRACE_EVENT(intel_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
TP_ARGS(crtc, frame, scanline_end),
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 04139ba1191e..b0ade76bec90 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -69,7 +69,7 @@ bool i915_error_injected(void);
#else
-#define i915_inject_probe_error(_i915, _err) 0
+#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
#define i915_error_injected() false
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e5512f26e20a..cbd783c31adb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -28,7 +28,9 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -112,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
+ kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
@@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work)
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
- if (vma->obj)
- __i915_gem_object_unpin_pages(vma->obj);
-
return err;
}
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
+ .release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
@@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma,
i915_active_set_exclusive(&vma->active, &work->base.dma);
work->base.dma.error = 0; /* enable the queue_work() */
- if (vma->obj)
+ if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
} else {
GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
@@ -939,6 +951,38 @@ err_pages:
return err;
}
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_barriers(engine);
+
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ do {
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
+ flush_idle_contexts(vm->gt);
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
+ i915_gem_evict_vm(vm);
+ mutex_unlock(&vm->mutex);
+ }
+ } while (1);
+}
+
void i915_vma_close(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
@@ -978,8 +1022,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
@@ -1019,7 +1065,9 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (i915_vm_tryopen(vm)) {
+ list_del_init(&vma->closed_link);
+ } else {
i915_gem_object_put(obj);
obj = NULL;
}
@@ -1027,7 +1075,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock);
if (obj) {
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
i915_gem_object_put(obj);
}
@@ -1054,17 +1102,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->mutex);
-
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1104,8 +1151,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return err;
if (flags & EXEC_OBJECT_WRITE) {
- if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_add_request(&obj->frontbuffer->write, rq);
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (unlikely(front)) {
+ if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ i915_active_add_request(&front->write, rq);
+ intel_frontbuffer_put(front);
+ }
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -1146,7 +1199,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
- return -EBUSY;
+ return -EAGAIN;
}
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1186,7 +1239,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
- drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
@@ -1195,6 +1248,9 @@ int i915_vma_unbind(struct i915_vma *vma)
struct i915_address_space *vm = vma->vm;
int err;
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 465932813bc5..5fffa3c58908 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -51,18 +51,26 @@ enum i915_cache_level;
*/
struct i915_vma {
struct drm_mm_node node;
- struct drm_i915_gem_object *obj;
+
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
- struct i915_fence_reg *fence;
+
+ struct drm_i915_gem_object *obj;
struct dma_resv *resv; /** Alias of obj->resv */
+
struct sg_table *pages;
void __iomem *iomap;
void *private; /* owned by creator */
+
+ struct i915_fence_reg *fence;
+
u64 size;
u64 display_alignment;
struct i915_page_sizes page_sizes;
+ /* mmap-offset associated with fencing for this vma */
+ struct i915_mmap_offset *mmo;
+
u32 fence_size;
u32 fence_alignment;
@@ -71,6 +79,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
+ struct kref ref;
atomic_t open_count;
atomic_t flags;
/**
@@ -333,7 +342,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
-void i915_vma_destroy(struct i915_vma *vma);
+
+static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
+{
+ if (kref_get_unless_zero(&vma->ref))
+ return vma;
+
+ return NULL;
+}
+
+void i915_vma_release(struct kref *ref);
+static inline void __i915_vma_put(struct i915_vma *vma)
+{
+ kref_put(&vma->ref, i915_vma_release);
+}
#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
@@ -349,6 +371,7 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
int __must_check
i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index a5b571364cf6..1acb5db77431 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -73,9 +73,30 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p)
+static const char *iommu_name(void)
{
+ const char *msg = "n/a";
+
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+
+ return msg;
+}
+
+void intel_device_info_print_static(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "engines: %x\n", info->engine_mask);
+ drm_printf(p, "gen: %d\n", info->gen);
+ drm_printf(p, "gt: %d\n", info->gt);
+ drm_printf(p, "iommu: %s\n", iommu_name());
+ drm_printf(p, "memory-regions: %x\n", info->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
+ drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
@@ -106,8 +127,8 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
+ struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
@@ -148,8 +169,8 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
}
}
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
{
int s, ss;
@@ -808,6 +829,8 @@ static const u16 subplatform_ult_ids[] = {
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
INTEL_WHL_U_GT3_IDS(0),
+ INTEL_CML_U_GT1_IDS(0),
+ INTEL_CML_U_GT2_IDS(0),
};
static const u16 subplatform_ulx_ids[] = {
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 4bdf8a6cfb47..2725cb7fc169 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -230,12 +230,13 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump_flags(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
+
+void intel_device_info_print_static(const struct intel_device_info *info,
struct drm_printer *p);
-void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
+void intel_device_info_print_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
+void intel_device_info_print_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p);
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index baaeaecc64af..e24c280e5930 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -73,6 +73,9 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
}
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return -E2BIG;
+
n_pages = size >> ilog2(mem->mm.chunk_size);
mutex_lock(&mem->mm_lock);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 8fd92b9130a7..43b68b5fc562 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -89,6 +89,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index d26c25dd8d54..3053d1ce398b 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -47,6 +47,7 @@ enum intel_pch {
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 29061685f764..31ec82337e4f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -463,7 +463,7 @@ static const int pessimal_latency_ns = 5000;
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
@@ -794,10 +794,10 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
/* FIXME check the 'enable' instead */
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return false;
/*
@@ -809,9 +809,28 @@ static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
* around this problem with the watermark code.
*/
if (plane->id == PLANE_CURSOR)
- return plane_state->base.fb != NULL;
+ return plane_state->hw.fb != NULL;
else
- return plane_state->base.visible;
+ return plane_state->uapi.visible;
+}
+
+static bool intel_crtc_active(struct intel_crtc *crtc)
+{
+ /* Be paranoid as we can arrive here with only partial
+ * state retrieved from the hardware during setup.
+ *
+ * We can ditch the adjusted_mode.crtc_clock check as soon
+ * as Haswell has gained clock readout/fastboot support.
+ *
+ * We can ditch the crtc->primary->state->fb check as soon as we can
+ * properly reconstruct framebuffers.
+ *
+ * FIXME: The intel_crtc->active here should be switched to
+ * crtc->state->active once we have proper CRTC states wired up
+ * for atomic.
+ */
+ return crtc->active && crtc->base.primary->state->fb &&
+ crtc->config->hw.adjusted_mode.crtc_clock;
}
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
@@ -850,7 +869,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
crtc = single_enabled_crtc(dev_priv);
if (crtc) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp = fb->format->cpp[0];
@@ -1083,10 +1102,10 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
unsigned int clock, htotal, cpp, width, wm;
@@ -1096,7 +1115,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
/*
* Not 100% sure which way ELK should go here as the
@@ -1116,7 +1135,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->uapi.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1143,7 +1162,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
for (; level < intel_wm_num_levels(dev_priv); level++) {
@@ -1159,7 +1178,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
int level, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
bool dirty = false;
/* NORMAL level doesn't have an FBC watermark */
@@ -1182,7 +1201,7 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
enum plane_id plane_id = plane->id;
bool dirty = false;
@@ -1261,7 +1280,7 @@ static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
int level)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (level > dev_priv->wm.max_level)
return false;
@@ -1299,9 +1318,9 @@ static void g4x_invalidate_wms(struct intel_crtc *crtc,
static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
int num_active_planes = hweight8(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
@@ -1316,8 +1335,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1388,17 +1407,17 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
enum plane_id plane_id;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -1528,10 +1547,11 @@ static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
}
static void g4x_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
@@ -1540,10 +1560,11 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -1589,10 +1610,10 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int level)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
unsigned int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
@@ -1601,7 +1622,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
width = crtc_state->pipe_src_w;
@@ -1630,7 +1651,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
@@ -1742,7 +1763,7 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int num_levels = intel_wm_num_levels(dev_priv);
bool dirty = false;
@@ -1759,7 +1780,7 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
enum plane_id plane_id = plane->id;
int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
@@ -1817,16 +1838,16 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int num_active_planes = hweight8(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
- bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
@@ -1837,8 +1858,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
- if (new_plane_state->base.crtc != &crtc->base &&
- old_plane_state->base.crtc != &crtc->base)
+ if (new_plane_state->hw.crtc != &crtc->base &&
+ old_plane_state->hw.crtc != &crtc->base)
continue;
if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
@@ -1923,11 +1944,12 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_uncore *uncore = &dev_priv->uncore;
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
@@ -2021,17 +2043,17 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_crtc_state->base.state);
+ to_intel_atomic_state(new_crtc_state->uapi.state);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(intel_state, crtc);
const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
int level;
- if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
+ if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
*intermediate = *optimal;
intermediate->cxsr = false;
@@ -2147,10 +2169,11 @@ static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
}
static void vlv_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
@@ -2159,10 +2182,11 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -2187,7 +2211,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2268,7 +2292,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2295,7 +2319,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *adjusted_mode =
- &crtc->config->base.adjusted_mode;
+ &crtc->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
int cpp;
@@ -2343,7 +2367,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
const struct drm_display_mode *adjusted_mode =
- &enabled->config->base.adjusted_mode;
+ &enabled->config->hw.adjusted_mode;
const struct drm_framebuffer *fb =
enabled->base.primary->state->fb;
int clock = adjusted_mode->crtc_clock;
@@ -2401,7 +2425,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
if (crtc == NULL)
return;
- adjusted_mode = &crtc->config->base.adjusted_mode;
+ adjusted_mode = &crtc->config->hw.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
@@ -2483,7 +2507,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
@@ -2491,8 +2515,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
return method1;
method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
@@ -2515,12 +2539,12 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
method2 = ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -2541,11 +2565,11 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
return ilk_wm_method2(crtc_state->pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&plane_state->base.dst),
+ crtc_state->hw.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->uapi.dst),
cpp, mem_value);
}
@@ -2559,9 +2583,10 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = plane_state->hw.fb->format->cpp[0];
- return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst),
+ cpp);
}
static unsigned int
@@ -2766,12 +2791,12 @@ static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->base.state);
+ to_intel_atomic_state(crtc_state->uapi.state);
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
u32 linetime, ips_linetime;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
@@ -3081,11 +3106,9 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
/* Compute new watermarks for the pipe */
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
- struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
@@ -3105,12 +3128,12 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
curstate = plane_state;
}
- pipe_wm->pipe_enabled = crtc_state->base.active;
+ pipe_wm->pipe_enabled = crtc_state->hw.active;
if (sprstate) {
- pipe_wm->sprites_enabled = sprstate->base.visible;
- pipe_wm->sprites_scaled = sprstate->base.visible &&
- (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
- drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
+ pipe_wm->sprites_enabled = sprstate->uapi.visible;
+ pipe_wm->sprites_scaled = sprstate->uapi.visible &&
+ (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 ||
+ drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16);
}
usable_level = max_level;
@@ -3162,11 +3185,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
*/
static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(newstate->base.state);
+ to_intel_atomic_state(newstate->uapi.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
@@ -3178,7 +3201,7 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
intel_state->skip_intermediate_wm)
return 0;
@@ -3588,10 +3611,8 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->wm.hw = *results;
}
-bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -3780,7 +3801,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
- if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
@@ -3830,7 +3851,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &crtc_state->base.adjusted_mode;
+ adjusted_mode = &crtc_state->hw.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
@@ -3859,16 +3880,16 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = crtc_state->base.crtc;
+ struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
u32 i;
- if (WARN_ON(!state) || !crtc_state->base.active) {
+ if (WARN_ON(!state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight8(dev_priv->active_pipes);
@@ -3907,11 +3928,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
*/
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
+ &crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
- if (!crtc_state->base.enable)
+ if (!crtc_state->hw.enable)
continue;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
@@ -3942,7 +3963,7 @@ static unsigned int
skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
int num_active)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
@@ -4082,10 +4103,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
*
* n.b., src is 16.16 fixed point, dst is whole integer.
*/
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4100,21 +4121,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
u32 data_rate;
u32 width = 0, height = 0;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
- if (!plane_state->base.visible)
+ if (!plane_state->uapi.visible)
return 0;
if (plane->id == PLANE_CURSOR)
return 0;
if (color_plane == 1 &&
- !drm_format_info_is_yuv_semiplanar(fb->format))
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
return 0;
/*
@@ -4122,8 +4143,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&plane_state->base.src) >> 16;
- height = drm_rect_height(&plane_state->base.src) >> 16;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+ height = drm_rect_height(&plane_state->uapi.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
if (color_plane == 1) {
@@ -4146,7 +4167,7 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
@@ -4181,7 +4202,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->base.state))
+ if (WARN_ON(!crtc_state->uapi.state))
return 0;
/* Calculate and cache data rate for each plane */
@@ -4225,8 +4246,8 @@ static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
@@ -4248,7 +4269,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
if (WARN_ON(!state))
return 0;
- if (!crtc_state->base.active) {
+ if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
}
@@ -4291,8 +4312,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (WARN_ON(wm->wm[level].min_ddb_alloc >
- total[PLANE_CURSOR])) {
+ if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
+ WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}
@@ -4490,7 +4511,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
- if (!crtc_state->base.active)
+ if (!crtc_state->hw.active)
return u32_to_fixed16(0);
pixel_rate = crtc_state->pixel_rate;
@@ -4498,7 +4519,7 @@ intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
if (WARN_ON(pixel_rate == 0))
return u32_to_fixed16(0);
- crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
+ crtc_htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
@@ -4533,12 +4554,13 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
@@ -4550,7 +4572,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4622,7 +4644,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
int width;
/*
@@ -4630,11 +4652,11 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&plane_state->base.src) >> 16;
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
- plane_state->base.rotation,
+ plane_state->hw.rotation,
skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
wp, color_plane);
}
@@ -4654,7 +4676,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4680,14 +4702,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
- crtc_state->base.adjusted_mode.crtc_htotal,
+ crtc_state->hw.adjusted_mode.crtc_htotal,
latency,
wp->plane_blocks_per_line);
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
+ if ((wp->cpp * crtc_state->hw.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
@@ -4778,7 +4800,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
@@ -4795,7 +4817,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
static u32
skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
u32 linetime_wm;
@@ -4814,7 +4836,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
@@ -4912,8 +4934,8 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id plane_id = plane->id;
int ret;
@@ -4938,7 +4960,7 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ enum plane_id plane_id = to_intel_plane(plane_state->uapi.plane)->id;
int ret;
/* Watermarks calculated in master */
@@ -4946,7 +4968,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
if (plane_state->planar_linked_plane) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
@@ -4974,7 +4996,7 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -5151,8 +5173,8 @@ static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
@@ -5356,7 +5378,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
if (ret)
return ret;
- state->active_pipe_changes = ~0;
+ state->active_pipe_changes = INTEL_INFO(dev_priv)->pipe_mask;
/*
* We usually only initialize state->active_pipes if we
@@ -5382,7 +5404,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- state->wm_results.dirty_pipes = ~0;
+ state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
ret = intel_add_all_pipes(state);
if (ret)
@@ -5436,7 +5458,7 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
* power well the hardware state will go out of sync
* with the software state.
*/
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
skl_plane_wm_equals(dev_priv,
&old_crtc_state->wm.skl.optimal.planes[plane_id],
&new_crtc_state->wm.skl.optimal.planes[plane_id]))
@@ -5500,11 +5522,12 @@ skl_compute_wm(struct intel_atomic_state *state)
}
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5514,10 +5537,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
}
static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct skl_ddb_values *results = &state->wm_results;
if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
@@ -5525,8 +5549,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_lock(&dev_priv->wm.wm_mutex);
- if (crtc_state->base.active_changed)
- skl_atomic_update_crtc_wm(state, crtc_state);
+ if (crtc_state->uapi.active_changed)
+ skl_atomic_update_crtc_wm(state, crtc);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5582,10 +5606,11 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
@@ -5594,10 +5619,11 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
@@ -5938,7 +5964,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < 3; level++) {
@@ -6093,7 +6119,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
enum plane_id plane_id = plane->id;
int level;
- if (plane_state->base.visible)
+ if (plane_state->uapi.visible)
continue;
for (level = 0; level < wm_state->num_levels; level++) {
@@ -6369,7 +6395,6 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
if (dev_priv->vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
I915_WRITE(TRANS_CHICKEN2(pipe), val);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index b579c724b915..c06c6a846d9a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -54,7 +54,7 @@ void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
+bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index 583118095635..eddb392917aa 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -51,8 +51,10 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
static void release_fake_lmem_bar(struct intel_memory_region *mem)
{
- if (drm_mm_node_allocated(&mem->fake_mappable))
- drm_mm_remove_node(&mem->fake_mappable);
+ if (!drm_mm_node_allocated(&mem->fake_mappable))
+ return;
+
+ drm_mm_remove_node(&mem->fake_mappable);
dma_unmap_resource(&mem->i915->drm.pdev->dev,
mem->remap_addr,
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index da6e8fd506e6..8d945db94b7a 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -59,9 +59,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
/**
* intel_wakeref_get: Acquire the wakeref
- * @i915: the drm_i915_private device
* @wf: the wakeref
- * @fn: callback for acquired the wakeref, called only on first acquire.
*
* Acquire a hold on the wakeref. The first user to do so, will acquire
* the runtime pm wakeref and then call the @fn underneath the wakeref
@@ -76,6 +74,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
static inline int
intel_wakeref_get(struct intel_wakeref *wf)
{
+ might_sleep();
if (unlikely(!atomic_inc_not_zero(&wf->count)))
return __intel_wakeref_get_first(wf);
@@ -83,6 +82,22 @@ intel_wakeref_get(struct intel_wakeref *wf)
}
/**
+ * __intel_wakeref_get: Acquire the wakeref, again
+ * @wf: the wakeref
+ *
+ * Increment the wakeref counter, only valid if it is already held by
+ * the caller.
+ *
+ * See intel_wakeref_get().
+ */
+static inline void
+__intel_wakeref_get(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+ atomic_inc(&wf->count);
+}
+
+/**
* intel_wakeref_get_if_in_use: Acquire the wakeref
* @wf: the wakeref
*
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 260b0ee5d1e3..ef572a0c2566 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -99,7 +99,7 @@ __live_active_setup(struct drm_i915_private *i915)
for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
@@ -155,7 +155,11 @@ static int live_active_wait(void *arg)
i915_active_wait(&active->base);
if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
pr_err("i915_active not retired after waiting!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
@@ -184,7 +188,11 @@ static int live_active_retire(void *arg)
err = -EIO;
if (!READ_ONCE(active->retired)) {
+ struct drm_printer p = drm_err_printer(__func__);
+
pr_err("i915_active not retired after flushing!\n");
+ i915_active_print(&active->base, &p);
+
err = -EINVAL;
}
@@ -250,3 +258,36 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
i915_active_release(ref);
}
}
+
+static void spin_unlock_wait(spinlock_t *lock)
+{
+ spin_lock_irq(lock);
+ spin_unlock_irq(lock);
+}
+
+void i915_active_unlock_wait(struct i915_active *ref)
+{
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rcu_read_lock();
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct dma_fence *f;
+
+ /* Wait for all active callbacks */
+ f = rcu_dereference(it->base.fence);
+ if (f)
+ spin_unlock_wait(f->lock);
+ }
+ rcu_read_unlock();
+
+ i915_active_release(ref);
+ }
+
+ /* And wait for the retire callback */
+ spin_lock_irq(&ref->tree_lock);
+ spin_unlock_irq(&ref->tree_lock);
+
+ /* ... which may have been on a thread instead */
+ flush_work(&ref->work);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d83f6bf6d9d4..b37fc53973cc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -9,6 +9,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "i915_selftest.h"
@@ -136,7 +137,7 @@ static int igt_gem_suspend(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -163,7 +164,7 @@ static int igt_gem_suspend(void *arg)
err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -171,7 +172,7 @@ static int igt_gem_hibernate(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
int err;
file = mock_file(i915);
@@ -198,7 +199,7 @@ static int igt_gem_hibernate(void *arg)
err = switch_to_context(ctx);
out:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 42e948144f1b..06ef88510209 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -198,8 +198,8 @@ static int igt_overcommit(void *arg)
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
- pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+ if (vma != ERR_PTR(-ENOSPC)) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
err = -EINVAL;
goto cleanup;
}
@@ -466,7 +466,7 @@ static int igt_evict_contexts(void *arg)
/* Overfill the GGTT with context objects and so try to evict one. */
for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
- struct drm_file *file;
+ struct file *file;
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -515,7 +515,7 @@ static int igt_evict_contexts(void *arg)
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
- mock_file_free(i915, file);
+ fput(file);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3f7e80fb3bbd..80cde5bda922 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -212,10 +212,12 @@ static int lowlevel_hole(struct drm_i915_private *i915,
unsigned long end_time)
{
I915_RND_STATE(seed_prng);
+ struct i915_vma *mock_vma;
unsigned int size;
- struct i915_vma mock_vma;
- memset(&mock_vma, 0, sizeof(struct i915_vma));
+ mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL);
+ if (!mock_vma)
+ return -ENOMEM;
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -239,8 +241,10 @@ static int lowlevel_hole(struct drm_i915_private *i915,
if (order)
break;
} while (count >>= 1);
- if (!count)
+ if (!count) {
+ kfree(mock_vma);
return -ENOMEM;
+ }
GEM_BUG_ON(!order);
GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
@@ -283,12 +287,12 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- mock_vma.pages = obj->mm.pages;
- mock_vma.node.size = BIT_ULL(size);
- mock_vma.node.start = addr;
+ mock_vma->pages = obj->mm.pages;
+ mock_vma->node.size = BIT_ULL(size);
+ mock_vma->node.start = addr;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- vm->insert_entries(vm, &mock_vma,
+ vm->insert_entries(vm, mock_vma,
I915_CACHE_NONE, 0);
}
count = n;
@@ -311,6 +315,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
cleanup_freed_objects(i915);
}
+ kfree(mock_vma);
return 0;
}
@@ -1001,9 +1006,9 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- struct drm_file *file;
struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
+ struct file *file;
int err;
if (!HAS_FULL_PPGTT(dev_priv))
@@ -1026,7 +1031,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
i915_vm_put(&ppgtt->vm);
out_free:
- mock_file_free(dev_priv, file);
+ fput(file);
return err;
}
@@ -1161,11 +1166,13 @@ static int igt_ggtt_page(void *arg)
goto out_free;
memset(&tmp, 0, sizeof(tmp));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err)
goto out_unpin;
@@ -1217,7 +1224,9 @@ static int igt_ggtt_page(void *arg)
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&tmp);
+ mutex_unlock(&ggtt->vm.mutex);
out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
@@ -1782,9 +1791,9 @@ static int igt_cs_tlb(void *arg)
struct i915_address_space *vm;
struct i915_gem_context *ctx;
struct intel_context *ce;
- struct drm_file *file;
struct i915_vma *vma;
I915_RND_STATE(prng);
+ struct file *file;
unsigned int i;
u32 *result;
u32 *batch;
@@ -2022,7 +2031,7 @@ out_put_bbe:
out_vm:
i915_vm_put(vm);
out_unlock:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 4b3cac73e291..476fba2ed8bb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_mocs, intel_mocs_live_selftests)
selftest(gt_pm, intel_gt_pm_live_selftests)
selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
@@ -36,5 +37,6 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
-selftest(guc, intel_guc_live_selftest)
selftest(perf, i915_perf_live_selftests)
+/* Here be dragons: keep last to run last! */
+selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index aabd07f67e49..d1a1568c47ba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -132,7 +132,7 @@ static int live_noa_delay(void *arg)
for (i = 0; i < 4; i++)
intel_write_status_page(stream->engine, 0x100 + i, 0);
- rq = i915_request_create(stream->engine->kernel_context);
+ rq = intel_engine_create_kernel_request(stream->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
new file mode 100644
index 000000000000..f7129a243daa
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/i915_selftest
+ */
+selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(blt, i915_gem_object_blt_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 8618a4dc0701..f89d9c42f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,11 +27,13 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
+#include "igt_spinner.h"
#include "lib_sw_fence.h"
#include "mock_drm.h"
@@ -540,6 +542,7 @@ static int live_nop_request(void *arg)
if (err)
return err;
+ intel_engine_pm_get(engine);
for_each_prime_number_from(prime, 1, 8192) {
struct i915_request *request = NULL;
@@ -578,6 +581,7 @@ static int live_nop_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
@@ -692,10 +696,13 @@ static int live_empty_request(void *arg)
if (err)
goto out_batch;
+ intel_engine_pm_get(engine);
+
/* Warmup / preload */
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
@@ -708,6 +715,7 @@ static int live_empty_request(void *arg)
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
+ intel_engine_pm_put(engine);
goto out_batch;
}
}
@@ -721,6 +729,7 @@ static int live_empty_request(void *arg)
break;
}
i915_request_put(request);
+ intel_engine_pm_put(engine);
err = igt_live_test_end(&t);
if (err)
@@ -740,10 +749,8 @@ out_batch:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx = i915->kernel_context;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
- struct i915_address_space *vm;
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -752,9 +759,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(obj, vm, NULL);
- i915_vm_put(vm);
+ vma = i915_vma_instance(obj, i915->gt.vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -845,7 +850,7 @@ static int live_all_engines(void *arg)
idx = 0;
for_each_uabi_engine(engine, i915) {
- request[idx] = i915_request_create(engine->kernel_context);
+ request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
@@ -962,7 +967,7 @@ static int live_sequential_engines(void *arg)
goto out_free;
}
- request[idx] = i915_request_create(engine->kernel_context);
+ request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
@@ -1067,15 +1072,18 @@ static int __live_parallel_engine1(void *arg)
struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
+ int err = 0;
count = 0;
+ intel_engine_pm_get(engine);
do {
struct i915_request *rq;
- int err;
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_get(rq);
i915_request_add(rq);
@@ -1085,13 +1093,14 @@ static int __live_parallel_engine1(void *arg)
err = -ETIME;
i915_request_put(rq);
if (err)
- return err;
+ break;
count++;
} while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
pr_info("%s: %lu request + sync\n", engine->name, count);
- return 0;
+ return err;
}
static int __live_parallel_engineN(void *arg)
@@ -1099,21 +1108,100 @@ static int __live_parallel_engineN(void *arg)
struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time);
unsigned long count;
+ int err = 0;
count = 0;
+ intel_engine_pm_get(engine);
do {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
count++;
} while (!__igt_timeout(end_time, NULL));
+ intel_engine_pm_put(engine);
pr_info("%s: %lu requests\n", engine->name, count);
- return 0;
+ return err;
+}
+
+static bool wake_all(struct drm_i915_private *i915)
+{
+ if (atomic_dec_and_test(&i915->selftest.counter)) {
+ wake_up_var(&i915->selftest.counter);
+ return true;
+ }
+
+ return false;
+}
+
+static int wait_for_all(struct drm_i915_private *i915)
+{
+ if (wake_all(i915))
+ return 0;
+
+ if (wait_var_event_timeout(&i915->selftest.counter,
+ !atomic_read(&i915->selftest.counter),
+ i915_selftest.timeout_jiffies))
+ return 0;
+
+ return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ int err = 0;
+
+ /*
+ * Create a spinner running for eternity on each engine. If a second
+ * spinner is incorrectly placed on the same engine, it will not be
+ * able to start in time.
+ */
+
+ if (igt_spinner_init(&spin, engine->gt)) {
+ wake_all(engine->i915);
+ return -ENOMEM;
+ }
+
+ intel_engine_pm_get(engine);
+ rq = igt_spinner_create_request(&spin,
+ engine->kernel_context,
+ MI_NOOP); /* no preemption */
+ intel_engine_pm_put(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ if (err == -ENODEV)
+ err = 0;
+ wake_all(engine->i915);
+ goto out_spin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (igt_wait_for_spinner(&spin, rq)) {
+ /* Occupy this engine for the whole test */
+ err = wait_for_all(engine->i915);
+ } else {
+ pr_err("Failed to start spinner on %s\n", engine->name);
+ err = -EINVAL;
+ }
+ igt_spinner_end(&spin);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_fini(&spin);
+ return err;
}
static int live_parallel_engines(void *arg)
@@ -1122,6 +1210,7 @@ static int live_parallel_engines(void *arg)
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+ __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1140,13 +1229,17 @@ static int live_parallel_engines(void *arg)
return -ENOMEM;
for (fn = func; !err && *fn; fn++) {
+ char name[KSYM_NAME_LEN];
struct igt_live_test t;
unsigned int idx;
- err = igt_live_test_begin(&t, i915, __func__, "");
+ snprintf(name, sizeof(name), "%pS", fn);
+ err = igt_live_test_begin(&t, i915, __func__, name);
if (err)
break;
+ atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
@@ -1230,9 +1323,9 @@ static int live_breadcrumbs_smoketest(void *arg)
struct task_struct **threads;
struct igt_live_test live;
intel_wakeref_t wakeref;
- struct drm_file *file;
struct smoketest *smoke;
unsigned int n, idx;
+ struct file *file;
int ret = 0;
/*
@@ -1354,7 +1447,7 @@ out_threads:
out_smoke:
kfree(smoke);
out_file:
- mock_file_free(i915, file);
+ fput(file);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index a6cca4ad96f6..d3bf9eefb682 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -57,6 +57,12 @@ enum {
#undef selftest
};
+enum {
+#define selftest(name, func) perf_##name,
+#include "i915_perf_selftests.h"
+#undef selftest
+};
+
struct selftest {
bool enabled;
const char *name;
@@ -78,6 +84,12 @@ static struct selftest live_selftests[] = {
};
#undef selftest
+#define selftest(n, f) [perf_##n] = { .name = #n, { .live = f } },
+static struct selftest perf_selftests[] = {
+#include "i915_perf_selftests.h"
+};
+#undef selftest
+
/* Embed the line number into the parameter name so that we can order tests */
#define selftest(n, func) selftest_0(n, func, param(n))
#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
@@ -93,6 +105,13 @@ module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
#include "i915_live_selftests.h"
#undef selftest_0
#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __perf_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, perf_selftests[perf_##n].enabled, bool, 0400);
+#include "i915_perf_selftests.h"
+#undef selftest_0
+#undef param
#undef selftest
static void set_default_test_all(struct selftest *st, unsigned int count)
@@ -200,6 +219,27 @@ int i915_live_selftests(struct pci_dev *pdev)
return 0;
}
+int i915_perf_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.perf)
+ return 0;
+
+ err = run_selftests(perf, pdev_to_i915(pdev));
+ if (err) {
+ i915_selftest.perf = err;
+ return err;
+ }
+
+ if (i915_selftest.perf < 0) {
+ i915_selftest.perf = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
static bool apply_subtest_filter(const char *caller, const char *name)
{
char *filter, *sep, *tok;
@@ -365,3 +405,6 @@ MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardw
module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
+
+module_param_named_unsafe(perf_selftests, i915_selftest.perf, int, 0400);
+MODULE_PARM_DESC(perf_selftests, "Run performance orientated selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
new file mode 100644
index 000000000000..583a4ff8b8c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "igt_mmap.h"
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags)
+{
+ struct file *file;
+ int err;
+
+ /* Pretend to open("/dev/dri/card0") */
+ file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ err = drm_vma_node_allow(node, file->private_data);
+ if (err) {
+ addr = err;
+ goto out_file;
+ }
+
+ addr = vm_mmap(file, addr, drm_vma_node_size(node) << PAGE_SHIFT,
+ prot, flags, drm_vma_node_offset_addr(node));
+
+ drm_vma_node_revoke(node, file->private_data);
+out_file:
+ fput(file);
+ return addr;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
new file mode 100644
index 000000000000..6e716cb59d7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -0,0 +1,19 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_MMAP_H
+#define IGT_MMAP_H
+
+struct drm_i915_private;
+struct drm_vma_offset_node;
+
+unsigned long igt_mmap_node(struct drm_i915_private *i915,
+ struct drm_vma_offset_node *node,
+ unsigned long addr,
+ unsigned long prot,
+ unsigned long flags);
+
+#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ee8450b871da..e8a58fe49c39 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
memset(spin, 0, sizeof(*spin));
spin->gt = gt;
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
+ unsigned int flags;
u32 *batch;
int err;
GEM_BUG_ON(spin->gt != ce->vm->gt);
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (INTEL_GEN(rq->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ if (INTEL_GEN(rq->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (INTEL_GEN(rq->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
+
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
goto cancel_rq;
}
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ flags = 0;
+ if (INTEL_GEN(rq->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 19e1cca8f143..04d0aa7b349e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -32,7 +32,7 @@ static void close_objects(struct intel_memory_region *mem,
if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj);
/* No polluting the memory region between tests */
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -122,7 +122,7 @@ put:
static void igt_object_release(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ __i915_gem_object_put_pages(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
@@ -404,7 +404,7 @@ static int igt_lmem_write_gpu(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
- struct drm_file *file;
+ struct file *file;
I915_RND_STATE(prng);
u32 sz;
int err;
@@ -439,7 +439,7 @@ static int igt_lmem_write_gpu(void *arg)
out_put:
i915_gem_object_put(obj);
out_file:
- mock_file_free(i915, file);
+ fput(file);
return err;
}
@@ -506,7 +506,9 @@ static int igt_lmem_write_cpu(void *arg)
}
/* Put the pages into a known state -- from the gpu for added fun */
+ intel_engine_pm_get(engine);
err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ intel_engine_pm_put(engine);
if (err)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
deleted file mode 100644
index 09c704153456..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_drm.h"
-
-struct drm_file *mock_file(struct drm_i915_private *i915)
-{
- struct file *filp;
- struct inode *inode;
- struct drm_file *file;
- int err;
-
- inode = kzalloc(sizeof(*inode), GFP_KERNEL);
- if (!inode) {
- err = -ENOMEM;
- goto err;
- }
-
- inode->i_rdev = i915->drm.primary->index;
-
- filp = kzalloc(sizeof(*filp), GFP_KERNEL);
- if (!filp) {
- err = -ENOMEM;
- goto err_inode;
- }
-
- err = drm_open(inode, filp);
- if (err)
- goto err_filp;
-
- file = filp->private_data;
- memset(&file->filp, POISON_INUSE, sizeof(file->filp));
- file->authenticated = true;
-
- kfree(filp);
- kfree(inode);
- return file;
-
-err_filp:
- kfree(filp);
-err_inode:
- kfree(inode);
-err:
- return ERR_PTR(err);
-}
-
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
-{
- struct file filp = { .private_data = file };
-
- drm_release(NULL, &filp);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
index b39beee9f8f6..9916b6f95526 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.h
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -25,7 +25,21 @@
#ifndef __MOCK_DRM_H
#define __MOCK_DRM_H
-struct drm_file *mock_file(struct drm_i915_private *i915);
-void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+
+struct drm_file;
+struct file;
+
+static inline struct file *mock_file(struct drm_i915_private *i915)
+{
+ return mock_drm_getfile(i915->drm.primary, O_RDWR);
+}
+
+static inline struct drm_file *to_drm_file(struct file *f)
+{
+ return f->private_data;
+}
#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 27ed3cee6a9b..ac641f5360e1 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -54,25 +54,17 @@ void mock_device_flush(struct drm_i915_private *i915)
static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
mock_device_flush(i915);
+ intel_gt_driver_remove(&i915->gt);
i915_gem_drain_workqueue(i915);
-
- for_each_engine(engine, &i915->gt, id)
- mock_engine_free(engine);
- i915_gem_driver_release__contexts(i915);
-
- intel_timelines_fini(i915);
-
- drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
mock_fini_ggtt(&i915->ggtt);
destroy_workqueue(i915->wq);
+ intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -180,9 +172,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- intel_timelines_init(i915);
-
mock_init_ggtt(i915, &i915->ggtt);
+ i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -190,10 +181,6 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->engine[RCS0])
goto err_unlock;
- i915->kernel_context = mock_context(i915, NULL);
- if (!i915->kernel_context)
- goto err_engine;
-
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
@@ -202,13 +189,11 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
err_context:
- i915_gem_driver_release__contexts(i915);
-err_engine:
- mock_engine_free(i915->engine[RCS0]);
+ intel_gt_driver_remove(&i915->gt);
err_unlock:
- intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 3387393286de..e3f224f43beb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,6 +25,9 @@
#ifndef __MOCK_GTT_H
#define __MOCK_GTT_H
+struct drm_i915_private;
+struct i915_ggtt;
+
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
index 24608089d833..329bf74dfaca 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.h
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -6,6 +6,11 @@
#ifndef __MOCK_REGION_H
#define __MOCK_REGION_H
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_memory_region;
+
struct intel_memory_region *
mock_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index 8a2cc553f466..7acf1ef4d488 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,9 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
+struct drm_i915_private;
+struct intel_uncore;
+
void mock_uncore_init(struct intel_uncore *uncore,
struct drm_i915_private *i915);
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index c9f35378d391..47188df3080d 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -38,7 +38,7 @@ static struct gpiod_lookup_table panel_gpio_table = {
/* PWM consumed by the Intel GFX */
static struct pwm_lookup crc_pwm_lookup[] = {
- PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_backlight", 0, PWM_POLARITY_NORMAL),
+ PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
};
static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index e4577cc11689..8171dea4cc22 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -37,6 +37,8 @@ struct drm_private_state;
struct drm_modeset_acquire_ctx;
struct drm_device;
+void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state,
+ struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -48,6 +50,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state,
+ struct drm_plane *plane);
void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
@@ -59,6 +63,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
+void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector);
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 67af60bb527a..8b099b347817 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -42,6 +42,7 @@ struct dma_fence;
struct drm_file;
struct drm_device;
struct device;
+struct file;
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
@@ -387,4 +388,6 @@ void drm_event_cancel_free(struct drm_device *dev,
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 07b8e9f04599..79952d8c4bba 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -41,7 +41,7 @@
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
* only be visible for drmselftests.
*/
-#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#if defined(CONFIG_DRM_EXPORT_FOR_TESTS)
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
#else
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index b1f66b117c74..1d2c12219f44 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -446,23 +446,18 @@
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9B21, info), \
- INTEL_VGA_DEVICE(0x9BAA, info), \
- INTEL_VGA_DEVICE(0x9BAB, info), \
- INTEL_VGA_DEVICE(0x9BAC, info), \
- INTEL_VGA_DEVICE(0x9BA0, info), \
INTEL_VGA_DEVICE(0x9BA5, info), \
INTEL_VGA_DEVICE(0x9BA8, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
INTEL_VGA_DEVICE(0x9BA2, info)
+#define INTEL_CML_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B21, info), \
+ INTEL_VGA_DEVICE(0x9BAA, info), \
+ INTEL_VGA_DEVICE(0x9BAC, info)
+
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9B41, info), \
- INTEL_VGA_DEVICE(0x9BCA, info), \
- INTEL_VGA_DEVICE(0x9BCB, info), \
- INTEL_VGA_DEVICE(0x9BCC, info), \
- INTEL_VGA_DEVICE(0x9BC0, info), \
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
@@ -471,6 +466,11 @@
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
+#define INTEL_CML_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B41, info), \
+ INTEL_VGA_DEVICE(0x9BCA, info), \
+ INTEL_VGA_DEVICE(0x9BCC, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
@@ -536,7 +536,9 @@
INTEL_WHL_U_GT3_IDS(info), \
INTEL_AML_CFL_GT2_IDS(info), \
INTEL_CML_GT1_IDS(info), \
- INTEL_CML_GT2_IDS(info)
+ INTEL_CML_GT2_IDS(info), \
+ INTEL_CML_U_GT1_IDS(info), \
+ INTEL_CML_U_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
@@ -579,12 +581,15 @@
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
-/* EHL */
+/* EHL/JSL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4500, info), \
INTEL_VGA_DEVICE(0x4571, info), \
INTEL_VGA_DEVICE(0x4551, info), \
- INTEL_VGA_DEVICE(0x4541, info)
+ INTEL_VGA_DEVICE(0x4541, info), \
+ INTEL_VGA_DEVICE(0x4E71, info), \
+ INTEL_VGA_DEVICE(0x4E61, info), \
+ INTEL_VGA_DEVICE(0x4E51, info)
/* TGL */
#define INTEL_TGL_12_IDS(info) \
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c50d01ef1414..664f52c6dd4c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -627,6 +627,13 @@ do { \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
+# define might_lock_nested(lock, subclass) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
+ _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
#define lockdep_assert_irqs_enabled() do { \
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
@@ -649,6 +656,7 @@ do { \
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 8caaaf7ff91b..5ba481f49931 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -411,6 +411,17 @@ extern "C" {
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 5400d7e057f1..829c0a48577f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -395,6 +395,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -793,6 +794,37 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+struct drm_i915_gem_mmap_offset {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * It is mandatory that one of the MMAP_OFFSET types
+ * (GTT, WC, WB, UC, etc) should be included.
+ */
+ __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+
+ /*
+ * Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;