summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/i915.rst8
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c79
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile25
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c434
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c461
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c131
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c848
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c73
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c55
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c362
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c734
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c15
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c3
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c402
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c27
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c178
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c149
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c135
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c790
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c296
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c445
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c208
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c63
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h62
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c203
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c309
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c253
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c18
-rw-r--r--drivers/gpu/drm/i915/i915_active.c123
-rw-r--r--drivers/gpu/drm/i915/i915_active.h8
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c216
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c517
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c7
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c31
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c15
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h26
-rw-r--r--drivers/gpu/drm/i915/i915_request.c234
-rw-r--r--drivers/gpu/drm/i915/i915_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c1
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h27
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h25
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c500
-rw-r--r--drivers/gpu/drm/i915/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c257
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--include/drm/drm_dp_helper.h21
-rw-r--r--include/drm/i915_mei_hdcp_interface.h1
-rw-r--r--include/uapi/drm/i915_drm.h21
172 files changed, 6992 insertions, 4132 deletions
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index e539c42a3e78..f6d363b6756e 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -207,10 +207,10 @@ DPIO
CSR firmware support for DMC
----------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
:doc: csr support for dmc
-.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
:internal:
Video BIOS Table (VBT)
@@ -332,7 +332,7 @@ This process is dubbed relocation.
GEM BO Management Implementation Details
----------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Virtual Memory Address
Buffer Object Eviction
@@ -382,7 +382,7 @@ Logical Rings, Logical Ring Contexts and Execlists
Global GTT views
----------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Global GTT views
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a4b98f8055f4..c6fbe6e6bc9d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1280,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+struct edid_quirk {
+ u8 mfg_id[2];
+ u8 prod_id[2];
+ u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+ /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+ * only supports DPCD backlight controls
+ */
+ { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ /*
+ * Some Dell CML 2020 systems have panels support both AUX and PWM
+ * backlight control, and some only support AUX backlight control. All
+ * said panels start up in AUX mode by default, and we don't have any
+ * support for disabling HDR mode on these panels which would be
+ * required to switch to PWM backlight control mode (plus, I'm not
+ * even sure we want PWM backlight controls over DPCD backlight
+ * controls anyway...). Until we have a better way of detecting these,
+ * force DPCD backlight mode on all of them.
+ */
+ { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+ const struct edid_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ if (!edid)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+ if (memcmp(quirk->mfg_id, edid->mfg_id,
+ sizeof(edid->mfg_id)) == 0 &&
+ memcmp(quirk->prod_id, edid->prod_code,
+ sizeof(edid->prod_code)) == 0)
+ quirks |= quirk->quirks;
+ }
+
+ DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+ (int)sizeof(edid->mfg_id), edid->mfg_id,
+ (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+ return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 61e7beada832..6c62ad8f4414 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -5472,7 +5472,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, 0,
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6ae38eb..0bfd276c19fe 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL
check the health of the GPU and undertake regular house-keeping of
internal driver state.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
May be 0 to disable heartbeats and therefore disable automatic GPU
hang detection.
@@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT
expires, the HW will be reset to allow the more important context
to execute.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
May be 0 to disable the timeout.
-config DRM_I915_SPIN_REQUEST
- int "Busywait for request completion (us)"
- default 5 # microseconds
+ The compiled in default may get overridden at driver probe time on
+ certain platforms and certain engines which will be reflected in the
+ sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+ int "Busywait for request completion limit (ns)"
+ default 8000 # nanoseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST
check if the request will complete in the time it would have taken
us to enable the interrupt.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
@@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/stop_timeout_ms
+
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
@@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION
is scheduled for execution for the timeslice duration, before
switching to the next context.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index bc28c31c4f78..9f887a86e555 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -47,6 +47,7 @@ i915-y += i915_drv.o \
i915_sysfs.o \
i915_utils.o \
intel_device_info.o \
+ intel_dram.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
@@ -79,9 +80,11 @@ gt-y += \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen6_ppgtt.o \
+ gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
+ gt/intel_context_param.o \
gt/intel_context_sseu.o \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
@@ -107,7 +110,8 @@ gt-y += \
gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
- gt/intel_workarounds.o
+ gt/intel_workarounds.o \
+ gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index d842e280699d..17cee6f80d8b 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -599,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -615,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -633,7 +633,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -652,7 +652,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void
@@ -1350,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock =
- cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+ pipe_config->port_clock = intel_dpll_get_freq(i915,
+ pipe_config->shared_dpll);
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index c86d7a35c816..457b258683d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -133,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
+ unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
+ pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
cpp = fb->format->cpp[0];
/*
@@ -153,7 +175,7 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
if (fb->format->is_yuv && fb->format->num_planes > 1)
cpp *= 4;
- return cpp * crtc_state->pixel_rate;
+ return pixel_rate * cpp;
}
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 2bcf15e34728..a6bbf42bae1f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 30fb7c887ff0..62f234f641de 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -149,6 +149,10 @@ static const struct {
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
};
/* HDMI N/CTS table */
@@ -234,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -243,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
+ if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
@@ -844,7 +852,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int ret;
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 2049cf5b54f3..839124647202 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index c17199caeff8..e29e79faa01b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0741d643455b..979a0241fdcb 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1868,6 +1868,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
+ struct intel_encoder *encoder;
+
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
@@ -1876,8 +1878,28 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->gmbus_mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->gmbus_mutex);
+ }
+
dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->gmbus_mutex);
+
if (drm_WARN(&dev_priv->drm,
intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 36dd52d2a9ee..c1cce93a1c25 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -348,48 +348,56 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
crtc_state->csc_mode);
}
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
- if (crtc_state->hw.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
- u16 coeffs[9] = {};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff =
- ((1ULL << 63) - 1) & ctm->matrix[i];
-
- /* Round coefficient. */
- abs_coeff += 1 << (32 - 13);
- /* Clamp to hardware limits. */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
- /* Write coefficients in S3.12 format. */
- if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] = 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
- }
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
+
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- intel_de_write(dev_priv, CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
}
static u32 i9xx_lut_8(const struct drm_color_lut *color)
@@ -399,6 +407,13 @@ static u32 i9xx_lut_8(const struct drm_color_lut *color)
drm_color_lut_extract(color->blue, 8);
}
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
+}
+
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
{
@@ -415,48 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
(color->blue >> 8);
}
-static u32 ilk_lut_10(const struct drm_color_lut *color)
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
- return drm_color_lut_extract(color->red, 10) << 20 |
- drm_color_lut_extract(color->green, 10) << 10 |
- drm_color_lut_extract(color->blue, 10);
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
- const struct drm_property_blob *blob)
+static u16 i965_lut_11p6_max_pack(u32 val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i;
-
- if (HAS_GMCH(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- if (blob) {
- const struct drm_color_lut *lut = blob->data;
-
- for (i = 0; i < 256; i++) {
- u32 word = i9xx_lut_8(&lut[i]);
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
- if (HAS_GMCH(dev_priv))
- intel_de_write(dev_priv, PALETTE(pipe, i),
- word);
- else
- intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
- word);
- }
- }
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -525,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -548,14 +578,38 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ i9xx_load_lut_8(crtc, gamma_lut);
else
i965_load_lut_10p6(crtc, gamma_lut);
}
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
static void ilk_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -566,7 +620,7 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
for (i = 0; i < lut_size; i++)
intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
- ilk_lut_10(&lut[i]));
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -575,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
else
ilk_load_lut_10(crtc, gamma_lut);
}
@@ -685,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -708,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -729,9 +783,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
- u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -770,8 +823,7 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- u32 i;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -812,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
glk_load_degamma_lut_linear(crtc_state);
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
@@ -856,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Super Fine segment (let's call it seg1)...
@@ -889,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Fine segment (let's call it seg2)...
@@ -948,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
@@ -974,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return drm_color_lut_extract(color->red, 14);
}
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -1020,21 +1079,24 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
- cherryview_load_csc_matrix(crtc_state);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
- }
-
- if (degamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, degamma_lut);
- if (gamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1660,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
return true;
}
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1689,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- if (HAS_GMCH(dev_priv))
- val = intel_de_read(dev_priv, PALETTE(pipe, i));
- else
- val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_RED_MASK, val), 8);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_GREEN_MASK, val), 8);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_BLUE_MASK, val), 8);
+ u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
}
return blob;
@@ -1710,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
}
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val1, val2;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1733,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- val1 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
- val2 = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
-
- blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_RED_MASK, val1);
- blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
- blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
- blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
- blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
}
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1785,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
-
- val = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
return blob;
@@ -1804,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1827,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
}
return blob;
@@ -1845,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
@@ -1852,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
}
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int hw_lut_size = ivb_lut_10_size(prec_index);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1874,20 +1917,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
- val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
}
intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
@@ -1897,13 +1935,15 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index f49c98f6cb7e..78f9b6cde810 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 57320c12839f..3112572cfb7d 100644
--- a/drivers/gpu/drm/i915/display/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -40,8 +40,8 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9f7d1d7189ae..73d0f4648c06 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1325,164 +1325,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int refclk;
- int n, p, r;
- u32 wrpll;
-
- wrpll = intel_de_read(dev_priv, reg);
- switch (wrpll & WRPLL_REF_MASK) {
- case WRPLL_REF_SPECIAL_HSW:
- /*
- * muxed-SSC for BDW.
- * non-SSC for non-ULT HSW. Check FUSE_STRAP3
- * for the non-SSC reference frequency.
- */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- if (intel_de_read(dev_priv, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- refclk = 24;
- else
- refclk = 135;
- break;
- }
- /* fall through */
- case WRPLL_REF_PCH_SSC:
- /*
- * We could calculate spread here, but our checking
- * code only cares about 5% accuracy, and spread is a max of
- * 0.5% downspread.
- */
- refclk = 135;
- break;
- case WRPLL_REF_LCPLL:
- refclk = 2700;
- break;
- default:
- MISSING_CASE(wrpll);
- return 0;
- }
-
- r = wrpll & WRPLL_DIVIDER_REF_MASK;
- p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
- n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
- /* Convert to KHz, p & r have a fixed point portion */
- return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq;
-
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR2_PDIV_1:
- p0 = 1;
- break;
- case DPLL_CFGCR2_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR2_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR2_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR2_KDIV_5:
- p2 = 5;
- break;
- case DPLL_CFGCR2_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR2_KDIV_3:
- p2 = 3;
- break;
- case DPLL_CFGCR2_KDIV_1:
- p2 = 1;
- break;
- }
-
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
- * 24 * 1000;
-
- dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
- * 24 * 1000) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq, ref_clock;
-
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR1_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR1_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR1_PDIV_5:
- p0 = 5;
- break;
- case DPLL_CFGCR1_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR1_KDIV_1:
- p2 = 1;
- break;
- case DPLL_CFGCR1_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR1_KDIV_3:
- p2 = 3;
- break;
- }
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
- * ref_clock;
-
- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
- if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1505,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *pll_state)
-{
- u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
- u64 tmp;
-
- ref_clock = dev_priv->cdclk.hw.ref;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
- m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
- DKL_PLL_BIAS_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
- } else {
- m2_frac = 0;
- }
- } else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
- MG_PLL_DIV0_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
- } else {
- m2_frac = 0;
- }
- }
-
- switch (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
- div1 = 2;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
- div1 = 3;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
- div1 = 5;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
- div1 = 7;
- break;
- default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
- return 0;
- }
-
- div2 = (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
- /* div2 value of 0 is same as 1 means no div */
- if (div2 == 0)
- div2 = 1;
-
- /*
- * Adjust the original formula to delay the division by 2^22 in order to
- * minimize possible rounding errors.
- */
- tmp = (u64)m1 * m2_int * ref_clock +
- (((u64)m1 * m2_frac * ref_clock) >> 22);
- tmp = div_u64(tmp, 5 * div1 * div2);
-
- return tmp;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1601,215 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- int link_clock;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
- pipe_config->shared_dpll);
-
- if (pll_id == DPLL_ID_ICL_TBTPLL)
- link_clock = icl_calc_tbt_pll_link(dev_priv, port);
- else
- link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
- switch (link_clock) {
- case DPLL_CFGCR0_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2700:
- link_clock = 270000;
- break;
- case DPLL_CFGCR0_LINK_RATE_3240:
- link_clock = 324000;
- break;
- case DPLL_CFGCR0_LINK_RATE_4050:
- link_clock = 405000;
- break;
- default:
- drm_WARN(&dev_priv->drm, 1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- /*
- * ctrl1 register is already shifted for each pll, just use 0 to get
- * the internal shift for each field
- */
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
- link_clock = skl_calc_wrpll_link(pll_state);
- } else {
- link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
- switch (link_clock) {
- case DPLL_CTRL1_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CTRL1_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CTRL1_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CTRL1_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CTRL1_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CTRL1_LINK_RATE_2700:
- link_clock = 270000;
- break;
- default:
- drm_WARN(encoder->base.dev, 1,
- "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 val, pll;
-
- val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
- switch (val & PORT_CLK_SEL_MASK) {
- case PORT_CLK_SEL_LCPLL_810:
- link_clock = 81000;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- link_clock = 135000;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- link_clock = 270000;
- break;
- case PORT_CLK_SEL_WRPLL1:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
- break;
- case PORT_CLK_SEL_WRPLL2:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
- break;
- case PORT_CLK_SEL_SPLL:
- pll = intel_de_read(dev_priv, SPLL_CTL) & SPLL_FREQ_MASK;
- if (pll == SPLL_FREQ_810MHz)
- link_clock = 81000;
- else if (pll == SPLL_FREQ_1350MHz)
- link_clock = 135000;
- else if (pll == SPLL_FREQ_2700MHz)
- link_clock = 270000;
- else {
- drm_WARN(&dev_priv->drm, 1, "bad spll freq\n");
- return;
- }
- break;
- default:
- drm_WARN(&dev_priv->drm, 1, "bad port clock sel\n");
- return;
- }
-
- pipe_config->port_clock = link_clock * 2;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
-
- clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
- return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->port_clock =
- bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_clock_get(encoder, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+ DPLL_ID_ICL_TBTPLL)
+ pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+ encoder->port);
+ else
+ pipe_config->port_clock =
+ intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -3049,7 +2627,7 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
drm_WARN_ON(&dev_priv->drm,
@@ -3075,7 +2653,7 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -3084,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3189,7 +2767,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (drm_WARN_ON(&dev_priv->drm, !pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy))
@@ -3233,7 +2811,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
hsw_pll_to_ddi_pll_sel(pll));
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3987,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+ "scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c6579d972..55fd72b901fe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include <drm/i915_drm.h>
-
#include "intel_display.h"
struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3031e64ee518..8f23c4d51c33 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
@@ -2720,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
/*
* We assume the primary plane for pipe A has
- * the highest stride limits of them all.
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -9542,7 +9542,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -10129,6 +10129,9 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
@@ -14299,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14332,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
}
}
@@ -14359,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv,
+ &dev_priv->dpll.shared_dplls[i],
+ NULL, NULL);
}
static void
@@ -15318,7 +15323,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
int i;
@@ -15355,7 +15359,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe))
+ entries, I915_MAX_PIPES, pipe))
continue;
entries[pipe] = new_crtc_state->wm.skl.ddb;
@@ -15393,7 +15397,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe));
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
@@ -15428,7 +15432,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, pipe));
+ entries, I915_MAX_PIPES, pipe));
entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
@@ -16320,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -16401,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- possible_crtcs = BIT(pipe);
-
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -16454,7 +16455,6 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- unsigned int possible_crtcs;
struct intel_plane *cursor;
int ret, zpos;
@@ -16487,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- possible_crtcs, &intel_cursor_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -16619,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
@@ -16697,6 +16707,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
+ intel_crtc_crc_init(crtc);
+
drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -17785,11 +17797,9 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
i915_vma_put(plane_config->vma);
}
-int intel_modeset_init(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17814,6 +17824,17 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_fbc_init(i915);
+ return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
@@ -17834,6 +17855,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
}
}
+ intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(i915);
@@ -18311,7 +18333,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 active_pipes = 0;
- int i;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -18340,33 +18361,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
readout_plane_state(dev_priv);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
-
- if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
- }
-
- pll->state.crtc_mask = 0;
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (crtc_state->hw.active &&
- crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
- }
- pll->active_mask = pll->state.crtc_mask;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
- }
+ intel_dpll_readout_hw_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -18623,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- int i;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
@@ -18676,19 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- if (!pll->on || pll->active_mask)
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s enabled but not in use, disabling\n",
- pll->info->name);
-
- pll->info->funcs->disable(dev_priv, pll);
- pll->on = false;
- }
+ intel_dpll_sanitize_state(dev_priv);
if (IS_G4X(dev_priv)) {
g4x_wm_get_hw_state(dev_priv);
@@ -18820,6 +18802,15 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return HAS_TRANSCODER_EDP(dev_priv);
+ else
+ return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -18928,7 +18919,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ if (!has_transcoder(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f92efbbec838..adb1225a3480 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
@@ -40,6 +39,7 @@ struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
@@ -47,6 +47,7 @@ struct i915_ggtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
@@ -55,7 +56,6 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
-struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -313,10 +313,11 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+ for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -614,6 +615,7 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 46954cc7b6c0..1e6eb7f2f72d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -920,8 +920,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
int i;
drm_modeset_lock_all(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->dpll.ref_clks.nssc,
+ dev_priv->dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 6e25a1317161..246e406bb385 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -939,11 +939,17 @@ unlock:
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
+ enum i915_power_well_id high_pg;
- drm_WARN_ONCE(&dev_priv->drm, pg2_enabled,
- "PG2 not disabled to enable DC5.\n");
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (INTEL_GEN(dev_priv) >= 12)
+ high_pg = TGL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
drm_WARN_ONCE(&dev_priv->drm,
(intel_de_read(dev_priv, DC_STATE_EN) &
@@ -2740,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_2_POWER_DOMAINS | \
+ TGL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -3936,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = TGL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 601e000ffd0d..da64a5edae7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ TGL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 0d8a64305464..5e00e611f077 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,7 +39,6 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
@@ -642,6 +641,14 @@ struct intel_crtc_scaler_state {
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
struct intel_pipe_wm {
struct intel_wm_level wm[5];
bool fbc_wm_enabled;
@@ -650,6 +657,14 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
+ bool plane_en;
+ bool ignore_lines;
+};
+
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
@@ -1046,6 +1061,32 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1089,6 +1130,10 @@ struct intel_crtc {
/* per pipe DSB related info */
struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
};
struct intel_plane {
@@ -1235,6 +1280,7 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
+ u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
@@ -1407,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
}
static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+ return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+ drm_WARN_ON(&dev_priv->drm,
+ !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
return dev_priv->pipe_to_crtc_mapping[pipe];
}
@@ -1598,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_DP_MST) |
(1 << INTEL_OUTPUT_EDP));
}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(&dev_priv->drm, pipe);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_wait_one_vblank(&crtc->base);
}
+
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 0a5a9197f8f5..0a417cd2af2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -2399,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
@@ -4515,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+ !drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -5682,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -5694,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = NULL;
intel_dp->has_audio = false;
+ intel_dp->edid_quirks = 0;
}
static int
@@ -6449,6 +6451,7 @@ static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
+ int ret;
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
@@ -6464,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
}
static
@@ -7562,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
+ drm_connector_update_edid_property(connector, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7609,9 +7615,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_setup_backlight(connector, pipe);
if (fixed_mode) {
- /* We do not know the orientation, but their might be a quirk */
drm_connector_set_panel_orientation_with_quirk(connector,
- DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ dev_priv->vbt.orientation,
fixed_mode->hdisplay, fixed_mode->vdisplay);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da166054788..0c7be8ed1423 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 48276237b362..3e706bb850a8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -328,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_modparams.enable_dpcd_backlight == 0 ||
- (i915_modparams.enable_dpcd_backlight == -1 &&
- dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+ !intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
- if (!intel_dp_aux_display_control_capable(intel_connector))
+ /*
+ * There are a lot of machines that don't advertise the backlight
+ * control interface to use properly in their VBIOS, :\
+ */
+ if (dev_priv->vbt.backlight.type !=
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+ !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+ DRM_DEV_INFO(dev->dev,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
+ }
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index b15404a3b1ca..e08caca658c6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
void *port = connector->port;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -548,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index e5bfe5245276..2d47f1f756a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
* commit phase.
*/
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->shared_dplls[id];
+ return &dev_priv->dpll.shared_dplls[id];
}
/**
@@ -103,11 +119,14 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- if (drm_WARN_ON(&dev_priv->drm, pll < dev_priv->shared_dplls ||
- pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->dpll.num_shared_dpll))
return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+ return pll_idx;
}
/* For ILK+ */
@@ -144,7 +163,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
@@ -153,7 +172,7 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->info->funcs->prepare(dev_priv, pll);
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -173,7 +192,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
@@ -199,7 +218,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -222,7 +241,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
@@ -243,7 +262,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static struct intel_shared_dpll *
@@ -262,7 +281,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].crtc_mask == 0) {
@@ -362,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->shared_dplls[i];
+ &dev_priv->dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -462,7 +481,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
@@ -506,6 +525,19 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.get_hw_state = ibx_pch_dpll_get_hw_state,
};
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -818,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -846,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
return pll;
}
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll->state.hw_state.wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->dpll.ref_clks.nssc;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
@@ -878,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+ SPLL_REF_MUXED_SSC;
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -889,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(state, crtc);
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(crtc_state);
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return false;
-
- crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SPLL));
- } else {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+ else
return false;
- }
if (!pll)
return false;
@@ -918,6 +1043,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->dpll.ref_clks.nssc = 24000;
+ else
+ i915->dpll.ref_clks.nssc = 135000;
+}
+
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -929,12 +1064,14 @@ static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
};
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -958,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
};
struct skl_dpll_regs {
@@ -1230,6 +1386,7 @@ struct skl_wrpll_params {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
u64 afe_clock,
+ int ref_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
@@ -1289,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1362,14 +1520,15 @@ skip_remaining_dividers:
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
return true;
}
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1382,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc,
&wrpll_params))
return false;
@@ -1404,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ int ref_clock = i915->dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1444,6 +1662,40 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch ((pll->state.hw_state.ctrl1 &
+ DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool skl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1493,6 +1745,25 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -1507,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
};
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1903,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
static bool bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1936,6 +2242,13 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 100000;
+ i915->dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -1959,66 +2272,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
- const struct dpll_info *dpll_info;
-
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*put_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*update_active_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
- .dpll_info = pch_plls,
- .get_dplls = ibx_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
- .dpll_info = hsw_plls,
- .get_dplls = hsw_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
- .dpll_info = skl_plls,
- .get_dplls = skl_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
};
static const struct dpll_info bxt_plls[] = {
@@ -2032,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2275,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
- int ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL+, the spec states: if reference frequency is 38.4,
- * use 19.2 because the DPLL automatically divides that by 2.
- */
- if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
- ref_clock = 19200;
-
- return ref_clock;
-}
-
static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params,
+ int ref_clock)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
- u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2327,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
return true;
}
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ i915->dpll.ref_clks.nssc);
+}
+
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0, cfgcr1;
@@ -2363,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ int ref_clock)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2408,6 +2717,44 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool cnl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -2457,6 +2804,21 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+ return cnl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC reference */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -2470,6 +2832,7 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
+ .get_freq = cnl_ddi_pll_get_freq,
};
static const struct dpll_info cnl_plls[] = {
@@ -2483,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dplls = cnl_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = cnl_update_dpll_ref_clks,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2578,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->cdclk.hw.ref == 24000 ?
+ dev_priv->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2601,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2614,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2631,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll,
+ icl_wrpll_ref_clock(i915));
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
@@ -2645,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
@@ -2768,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->cdclk.hw.ref;
+ int refclk_khz = dev_priv->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2969,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
/**
* icl_set_active_port_dpll - select the active port DPLL for a given CRTC
* @crtc_state: state for the CRTC to select the DPLL for
@@ -3201,7 +3680,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+ if (dev_priv->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3682,6 +4161,12 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
}
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
@@ -3709,18 +4194,21 @@ static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
};
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info icl_plls[] = {
@@ -3739,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3753,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3760,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info tgl_plls[] = {
@@ -3780,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3814,7 +4306,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->num_shared_dpll = 0;
+ dev_priv->dpll.num_shared_dpll = 0;
return;
}
@@ -3822,14 +4314,14 @@ void intel_shared_dpll_init(struct drm_device *dev)
for (i = 0; dpll_info[i].name; i++) {
drm_WARN_ON(dev, i != dpll_info[i].id);
- dev_priv->shared_dplls[i].info = &dpll_info[i];
+ dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll_mgr = dpll_mgr;
- dev_priv->num_shared_dpll = i;
- mutex_init(&dev_priv->dpll_lock);
+ dev_priv->dpll.mgr = dpll_mgr;
+ dev_priv->dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll.lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
}
/**
@@ -3856,7 +4348,7 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return false;
@@ -3879,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -3908,7 +4400,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
@@ -3917,6 +4409,84 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
}
/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = pll->info->funcs->get_hw_state(i915, pll,
+ &pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
+ pll->state.crtc_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
+ }
+ pll->active_mask = pll->state.crtc_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+ i915->dpll.mgr->update_ref_clks(i915);
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on || pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
* @dev_priv: i915 drm device
* @hw_state: hw state to be written to the log
@@ -3926,8 +4496,8 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll_mgr) {
- dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->dpll.mgr) {
+ dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c64291d..5d9a2bc371e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
+
+ /**
+ * @get_freq:
+ *
+ * Hook for calculating the pll's output frequency based on its
+ * current state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
};
/**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 76ae01277fd6..d7a6bf2277df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -52,7 +52,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
@@ -72,7 +72,7 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Gem object creation failed\n");
+ drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- DRM_ERROR("Vma creation failed\n");
+ drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- DRM_ERROR("Command buffer creation failed\n");
+ drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
@@ -203,7 +203,7 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
}
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -277,7 +277,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
}
if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -310,7 +310,8 @@ void intel_dsb_commit(struct intel_dsb *dsb)
goto reset;
if (is_dsb_busy(dsb)) {
- DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
@@ -322,15 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
- DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
- DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
goto reset;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 694498f4b719..574dcfec9577 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 77f3d083b7a1..341d5ce8b062 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index ddf8d3bb7a7d..2e5d835a9eaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -320,7 +321,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
- } else {
+ } else if (dev_priv->ggtt.num_fences) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
@@ -508,12 +509,12 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->compressed_llb = compressed_llb;
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
intel_de_write(dev_priv, FBC_CFB_BASE,
dev_priv->dsm.start + fbc->compressed_fb.start);
intel_de_write(dev_priv, FBC_LL_BASE,
@@ -691,12 +692,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (intel_vgpu_active(dev_priv)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (!i915_modparams.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param or by default";
+ return false;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
+ return true;
+}
+
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!intel_fbc_can_enable(dev_priv))
+ return false;
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -795,28 +821,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!i915_modparams.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
-
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
-
- return true;
-}
-
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 876264fc6560..3bc804212a99 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 470b3b0b9bdb..813a4f7033e1 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -103,7 +103,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -123,7 +123,8 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
intel_de_posting_read(dev_priv, reg);
} else {
if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
}
}
@@ -155,7 +156,7 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -176,8 +177,9 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
if (old &&
intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -223,8 +225,8 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
intel_de_posting_read(dev_priv, SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -246,8 +248,9 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
if (old && intel_de_read(dev_priv, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -381,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -403,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 4ef8a81ae0ad..1fd3a5a6296b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -632,8 +631,9 @@ retry:
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
intel_de_write_fw(dev_priv, GMBUS0, 0);
@@ -656,8 +656,9 @@ clear_err:
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
@@ -669,9 +670,9 @@ clear_err:
intel_de_write_fw(dev_priv, GMBUS1, 0);
intel_de_write_fw(dev_priv, GMBUS0, 0);
- DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- adapter->name, msgs[i].addr,
- (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
@@ -680,16 +681,18 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
goto retry;
}
goto out;
timeout:
- DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
intel_de_write_fw(dev_priv, GMBUS0, 0);
/*
@@ -926,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ drm_dbg_kms(&dev_priv->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 229b4e329864..ee0f27ea2810 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Bksv is invalid\n");
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -485,8 +486,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
- sha_leftovers);
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
@@ -514,11 +515,11 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -537,7 +538,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -547,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -560,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -576,7 +579,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
num_downstream)) {
- DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -594,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
goto err;
}
- DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
- num_downstream);
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
ret = 0;
err:
kfree(ksv_fifo);
@@ -642,7 +646,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
return -EINVAL;
}
}
@@ -659,7 +664,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- DRM_ERROR("Timed out waiting for An\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -680,7 +685,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return ret;
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
- DRM_ERROR("BKSV is revoked\n");
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
@@ -706,7 +711,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Wait for R0 ready */
if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Timed out waiting for R0 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -743,8 +748,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
return -ETIMEDOUT;
}
@@ -753,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Timed out waiting for encryption\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -765,7 +772,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
@@ -1271,7 +1278,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
@@ -1280,7 +1287,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.send_cert.cert_rx.receiver_id,
1)) {
- DRM_ERROR("Receiver ID is revoked\n");
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1455,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1463,9 +1470,15 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
@@ -1474,7 +1487,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
msgs.recvid_list.receiver_ids,
device_cnt)) {
- DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1507,25 +1520,27 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1540,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
}
@@ -1630,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret, i, tries = 3;
for (i = 0; i < tries; i++) {
@@ -1638,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
break;
/* Clearing the mei hdcp session */
- DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
- i + 1, tries, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
if (i != tries) {
@@ -1652,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
}
@@ -1663,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
- DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
- hdcp->content_type, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
return ret;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
hdcp->hdcp2_encrypted = true;
return 0;
@@ -1687,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
@@ -1938,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
- DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 1b2eacaf8949..7c12ad609b1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index bdbb5ce3fa81..39930232b253 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -2276,14 +2275,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
- /* Display WA #1139: glk */
- if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- adjusted_mode->htotal > 5460)
- return false;
-
- /* Display Wa_1405510057:icl */
+ /* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+ bpc == 10 && IS_GEN(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0b408b..8ff1f76a63df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
#include <linux/hdmi.h>
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 8af0ae61e1bb..a091442efba4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
#include <linux/kernel.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -89,29 +87,16 @@
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- switch (port) {
- case PORT_A:
- return HPD_PORT_A;
- case PORT_B:
- return HPD_PORT_B;
- case PORT_C:
- return HPD_PORT_C;
- case PORT_D:
- return HPD_PORT_D;
- case PORT_E:
- return HPD_PORT_E;
- case PORT_F:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return HPD_PORT_E;
- return HPD_PORT_F;
- case PORT_G:
- return HPD_PORT_G;
- case PORT_H:
- return HPD_PORT_H;
- case PORT_I:
- return HPD_PORT_I;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ switch (phy) {
+ case PHY_F:
+ return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+ case PHY_A ... PHY_E:
+ case PHY_G ... PHY_I:
+ return HPD_PORT_A + phy - PHY_A;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
return HPD_NONE;
}
}
@@ -185,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
hpd->stats[pin].count);
}
@@ -217,7 +205,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- DRM_INFO("HPD interrupt storm detected on connector %s: "
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
connector->base.name);
@@ -259,8 +248,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
continue;
if (connector->base.polled != connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->base.name);
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
connector->base.polled = connector->polled;
}
drm_connector_list_iter_end(&conn_iter);
@@ -295,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
if (old_status == connector->base.status)
return INTEL_HOTPLUG_UNCHANGED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ drm_dbg_kms(&to_i915(dev)->drm,
+ "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status));
return INTEL_HOTPLUG_CHANGED;
}
@@ -373,7 +364,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -401,8 +392,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct intel_encoder *encoder =
intel_attached_encoder(connector);
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->base.name, pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event.\n",
+ connector->base.name, pin);
switch (encoder->hotplug(encoder, connector,
hpd_event_bits & hpd_bit)) {
@@ -487,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
- encoder->base.base.id, encoder->base.name,
- long_hpd ? "long" : "short");
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f57b321..1e6b4fda2900 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 516e7179a5a4..ad5cc13037ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -127,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- DRM_ERROR("Failed to allocate LPE audio platform device\n");
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -190,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
@@ -203,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
- DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
goto err;
}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
ret = lpe_audio_irq_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
@@ -223,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
- DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
}
@@ -259,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
if (ret)
- DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
- ret);
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
}
/**
@@ -278,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
if (lpe_audio_detect(dev_priv)) {
ret = lpe_audio_setup(dev_priv);
if (ret < 0)
- DRM_ERROR("failed to setup LPE Audio bridge\n");
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b7ad0b534790..9a067effcfa0 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
- DRM_DEBUG_KMS("Panel power timings uninitialized, "
- "setting defaults\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
pps->t1_t2 = 40 * 10;
pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->tx = 200 * 10;
}
- DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
- "divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -317,7 +317,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
- DRM_ERROR("timed out waiting for panel to power on\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
}
@@ -332,7 +333,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
intel_de_write(dev_priv, PP_CONTROL(0),
intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
intel_de_write(dev_priv, lvds_encoder->reg,
intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
- DRM_ERROR("Can't support LVDS on pipe A\n");
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
@@ -833,7 +836,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
if (!dev_priv->vbt.int_lvds_support) {
- DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
return;
}
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
return;
}
- DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
}
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
*/
fixed_mode = intel_encoder_current_mode(intel_encoder);
if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -985,8 +991,8 @@ out:
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -995,7 +1001,7 @@ out:
failed:
mutex_unlock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index dfd78fccd456..cc6b00959586 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,8 +30,6 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_panel.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 5f1207dec10e..481187223101 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
*/
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_ring.h"
@@ -324,7 +323,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
/* check for underruns */
tmp = intel_de_read(dev_priv, DOVSTA);
if (tmp & (1 << 17))
- DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -1069,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1093,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1228,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1372,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- DRM_INFO("Initialized overlay support.\n");
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
out_free:
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 5aead622019c..276f43870802 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -1882,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
+ u32 level, ns;
int retval;
/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1906,8 +1907,12 @@ static int pwm_setup_backlight(struct intel_connector *connector,
*/
pwm_apply_args(panel->backlight.pwm);
- retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
- CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ level = intel_panel_compute_brightness(connector, 100);
+ ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
@@ -1915,11 +1920,10 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return retval;
}
- panel->backlight.min = 0; /* 0% */
- panel->backlight.max = 100; /* 100% */
- panel->backlight.level = DIV_ROUND_UP(
- pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
+ level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.level =
+ intel_panel_compute_brightness(connector, level);
panel->backlight.enabled = panel->backlight.level != 0;
drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 59d7e3cb3445..a9a5df2fee4d 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -441,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
{
- enum pipe pipe;
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- spin_lock_init(&pipe_crc->lock);
- }
+ spin_lock_init(&pipe_crc->lock);
}
static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -587,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
@@ -640,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
u32 val = 0;
if (!crtc->crc.opened)
@@ -660,7 +657,7 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..43012b189415 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 7e754201f54d..fd9b146e3aba 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -304,7 +304,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(&dev_priv->drm,
"PSR support not currently available for this panel\n");
return;
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 9d235d270dac..46beb155d835 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index b0588150752c..637d8fe2f8c2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224aa17d..72065e4360d5 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 7abeefe8dce5..deda351719db 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
@@ -284,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
@@ -297,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
- if (!fb->format->is_yuv)
- return 0;
-
- /* YUV specific checks */
- if (!rotated) {
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
- } else {
- hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
if (src_x % hsub || src_w % hsub) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_x, src_w, hsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, yesno(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_y, src_h, vsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, yesno(rotated));
return -EINVAL;
}
@@ -355,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int pixel_rate = crtc_state->pixel_rate;
- unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
skl_plane_ratio(crtc_state, plane_state, &num, &den);
@@ -365,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- /* Downscaling limits the maximum pixel rate */
- dst_w = min(src_w, dst_w);
- dst_h = min(src_h, dst_h);
-
- return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
- mul_u32_u32(den, dst_w * dst_h));
+ return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int
@@ -2077,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -2155,6 +2166,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
return -EINVAL;
}
+ /* Wa_1606054188:tgl */
+ if (IS_TIGERLAKE(dev_priv) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3011,7 +3031,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3066,10 +3085,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -3120,7 +3137,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
@@ -3205,10 +3221,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 4f81ee26b7ab..d2e3a3a323e9 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 7cba57ae72fe..95ad87d4ccb3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index d07cfad8ce6f..f4c362dc6e15 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1591,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = intel_attached_encoder(connector);
- enum intel_display_power_domain power_domain;
- enum drm_panel_orientation orientation;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
- enum pipe pipe;
- u32 val;
-
- if (!encoder->get_hw_state(encoder, &pipe))
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- plane = to_intel_plane(crtc->base.primary);
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- val = intel_de_read(dev_priv, DSPCNTR(plane->i9xx_plane));
-
- if (!(val & DISPLAY_PLANE_ENABLE))
- orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- else if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- else
- orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum drm_panel_orientation orientation;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- orientation = vlv_dsi_get_hw_panel_orientation(connector);
- if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- return orientation;
- }
-
- return intel_dsi_get_panel_orientation(connector);
-}
-
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1662,7 +1609,7 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
drm_connector_set_panel_orientation_with_quirk(
&connector->base,
- vlv_dsi_get_panel_orientation(connector),
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa4812b..0598e5382a1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
0);
out_request:
if (unlikely(err)) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index adcebf22a3d3..026999b34abd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,10 +67,9 @@
#include <linux/log2.h>
#include <linux/nospec.h>
-#include <drm/i915_drm.h>
-
#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_ring.h"
@@ -243,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
- RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -256,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
static void free_engines_rcu(struct rcu_head *rcu)
{
- free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+ struct i915_gem_engines *engines =
+ container_of(rcu, struct i915_gem_engines, rcu);
+
+ i915_sw_fence_fini(&engines->fence);
+ free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_gem_engines *engines =
+ container_of(fence, typeof(*engines), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (!list_empty(&engines->link)) {
+ struct i915_gem_context *ctx = engines->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->stale.lock, flags);
+ list_del(&engines->link);
+ spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ }
+ i915_gem_context_put(engines->ctx);
+ break;
+
+ case FENCE_FREE:
+ init_rcu_head(&engines->rcu);
+ call_rcu(&engines->rcu, free_engines_rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+ struct i915_gem_engines *e;
+
+ e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ i915_sw_fence_init(&e->fence, engines_notify);
+ return e;
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -266,12 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
struct i915_gem_engines *e;
enum intel_engine_id id;
- e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ e = alloc_engines(I915_NUM_ENGINES);
if (!e)
return ERR_PTR(-ENOMEM);
- e->ctx = ctx;
-
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -305,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
- free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
@@ -492,30 +531,75 @@ static void kill_engines(struct i915_gem_engines *engines)
static void kill_stale_engines(struct i915_gem_context *ctx)
{
struct i915_gem_engines *pos, *next;
- unsigned long flags;
- spin_lock_irqsave(&ctx->stale.lock, flags);
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
- if (!i915_sw_fence_await(&pos->fence))
+ if (!i915_sw_fence_await(&pos->fence)) {
+ list_del_init(&pos->link);
continue;
+ }
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ spin_unlock_irq(&ctx->stale.lock);
kill_engines(pos);
- spin_lock_irqsave(&ctx->stale.lock, flags);
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
list_safe_reset_next(pos, next, link);
list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
i915_sw_fence_complete(&pos->fence);
}
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ spin_unlock_irq(&ctx->stale.lock);
}
static void kill_context(struct i915_gem_context *ctx)
{
kill_stale_engines(ctx);
- kill_engines(__context_engines_static(ctx));
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engines->link);
+
+ engines->ctx = i915_gem_context_get(ctx);
+
+ for_each_gem_engine(ce, engines, it) {
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* serialises with execbuf */
+ RCU_INIT_POINTER(ce->gem_context, NULL);
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ err = i915_sw_fence_await_dma_fence(&engines->fence,
+ fence, 0,
+ GFP_KERNEL);
+ dma_fence_put(fence);
+ }
+ intel_context_unpin(ce);
+ if (err < 0)
+ goto kill;
+ }
+
+ spin_lock_irq(&ctx->stale.lock);
+ if (!i915_gem_context_is_closed(ctx))
+ list_add_tail(&engines->link, &ctx->stale.engines);
+ spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+ kill_engines(engines);
+
+ i915_sw_fence_commit(&engines->fence);
}
static void set_closed_name(struct i915_gem_context *ctx)
@@ -539,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
+ /* Flush any concurrent set_engines() */
+ mutex_lock(&ctx->engines_mutex);
+ engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
- set_closed_name(ctx);
+ mutex_unlock(&ctx->engines_mutex);
mutex_lock(&ctx->mutex);
+ set_closed_name(ctx);
+
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
@@ -668,23 +757,30 @@ err_free:
return ERR_PTR(err);
}
-static void
+static int
context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
+ int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
- fn(ce, data);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ err = fn(ce, data);
+ if (err)
+ break;
+ }
i915_gem_context_unlock_engines(ctx);
+
+ return err;
}
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
+ return 0;
}
static struct i915_address_space *
@@ -722,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
intel_timeline_put(old);
}
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
+ return 0;
}
static void __assign_timeline(struct i915_gem_context *ctx,
@@ -806,6 +903,7 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
flush_work(&i915->gem.contexts.free_work);
+ rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -971,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ if (unlikely(!engines))
+ break;
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -981,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
@@ -997,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ if (!e) {
+ i915_active_release(&cb->base);
+ return -ENOENT;
+ }
+
+ for_each_gem_engine(ce, e, it) {
struct i915_request *rq;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1028,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -1215,6 +1344,63 @@ out:
return err;
}
+static int __apply_ringsize(struct intel_context *ce, void *sz)
+{
+ return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
+
+static int set_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+ return -EINVAL;
+
+ if (args->value < I915_GTT_PAGE_SIZE)
+ return -EINVAL;
+
+ if (args->value > 128 * I915_GTT_PAGE_SIZE)
+ return -EINVAL;
+
+ return context_apply_all(ctx,
+ __apply_ringsize,
+ __intel_context_ring_size(args->value));
+}
+
+static int __get_ringsize(struct intel_context *ce, void *arg)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(ce);
+ GEM_BUG_ON(sz > INT_MAX);
+
+ return sz; /* stop on first engine */
+}
+
+static int get_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ int sz;
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
+
+ if (args->size)
+ return -EINVAL;
+
+ sz = context_apply_all(ctx, __get_ringsize, NULL);
+ if (sz < 0)
+ return sz;
+
+ args->value = sz;
+ return 0;
+}
+
static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
@@ -1562,77 +1748,6 @@ static const i915_user_extension_fn set_engines__extensions[] = {
[I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
};
-static int engines_notify(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify state)
-{
- struct i915_gem_engines *engines =
- container_of(fence, typeof(*engines), fence);
-
- switch (state) {
- case FENCE_COMPLETE:
- if (!list_empty(&engines->link)) {
- struct i915_gem_context *ctx = engines->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->stale.lock, flags);
- list_del(&engines->link);
- spin_unlock_irqrestore(&ctx->stale.lock, flags);
- }
- break;
-
- case FENCE_FREE:
- init_rcu_head(&engines->rcu);
- call_rcu(&engines->rcu, free_engines_rcu);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static void engines_idle_release(struct i915_gem_engines *engines)
-{
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
- unsigned long flags;
-
- GEM_BUG_ON(!engines);
- i915_sw_fence_init(&engines->fence, engines_notify);
-
- INIT_LIST_HEAD(&engines->link);
- spin_lock_irqsave(&engines->ctx->stale.lock, flags);
- if (!i915_gem_context_is_closed(engines->ctx))
- list_add(&engines->link, &engines->ctx->stale.engines);
- spin_unlock_irqrestore(&engines->ctx->stale.lock, flags);
- if (list_empty(&engines->link)) /* raced, already closed */
- goto kill;
-
- for_each_gem_engine(ce, engines, it) {
- struct dma_fence *fence;
- int err;
-
- if (!ce->timeline)
- continue;
-
- fence = i915_active_fence_get(&ce->timeline->last_request);
- if (!fence)
- continue;
-
- err = i915_sw_fence_await_dma_fence(&engines->fence,
- fence, 0,
- GFP_KERNEL);
-
- dma_fence_put(fence);
- if (err < 0)
- goto kill;
- }
- goto out;
-
-kill:
- kill_engines(engines);
-out:
- i915_sw_fence_commit(&engines->fence);
-}
-
static int
set_engines(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
@@ -1669,14 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
* first 64 engines defined here.
*/
num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
- set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
- GFP_KERNEL);
+ set.engines = alloc_engines(num_engines);
if (!set.engines)
return -ENOMEM;
- set.engines->ctx = ctx;
-
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
replace:
mutex_lock(&ctx->engines_mutex);
+ if (i915_gem_context_is_closed(ctx)) {
+ mutex_unlock(&ctx->engines_mutex);
+ free_engines(set.engines);
+ return -ENOENT;
+ }
if (args->size)
i915_gem_context_set_user_engines(ctx);
else
@@ -1737,7 +1853,7 @@ replace:
mutex_unlock(&ctx->engines_mutex);
/* Keep track of old engine sets for kill_context() */
- engines_idle_release(set.engines);
+ engines_idle_release(ctx, set.engines);
return 0;
}
@@ -1748,7 +1864,7 @@ __copy_engines(struct i915_gem_engines *e)
struct i915_gem_engines *copy;
unsigned int n;
- copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ copy = alloc_engines(e->num_engines);
if (!copy)
return ERR_PTR(-ENOMEM);
@@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
if (!intel_engine_has_semaphores(ce->engine))
- return;
+ return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
intel_context_set_use_semaphores(ce);
else
intel_context_clear_use_semaphores(ce);
+
+ return 0;
}
static int set_priority(struct i915_gem_context *ctx,
@@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = set_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int copy_ring_size(struct intel_context *dst,
+ struct intel_context *src)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(src);
+ if (sz < 0)
+ return sz;
+
+ return intel_context_set_ring_size(dst, sz);
+}
+
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
@@ -1991,12 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
bool user_engines;
unsigned long n;
- clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
- clone->ctx = dst;
-
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2026,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
}
intel_context_set_gem(clone->engines[n], dst);
+
+ /* Copy across the preferred ringsize */
+ if (copy_ring_size(clone->engines[n], e->engines[n])) {
+ __free_engines(clone, n + 1);
+ goto err_unlock;
+ }
}
clone->num_engines = n;
@@ -2033,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */
- free_engines(__context_engines_static(dst));
- RCU_INIT_POINTER(dst->engines, clone);
+ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
if (user_engines)
i915_gem_context_set_user_engines(dst);
else
@@ -2388,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = get_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2461,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
const struct i915_gem_engines *e = it->engines;
struct intel_context *ctx;
+ if (unlikely(!e))
+ return NULL;
+
do {
if (it->idx >= e->num_engines)
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a355d87..57b7ae2893e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -207,7 +207,6 @@ static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
- GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 4f9c1f5a4ded..d3f4f28e9468 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
@@ -28,6 +27,19 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+struct eb_vma {
+ struct i915_vma *vma;
+ unsigned int flags;
+
+ /** This vma's place in the execbuf reservation list */
+ struct drm_i915_gem_exec_object2 *exec;
+ struct list_head bind_link;
+ struct list_head reloc_link;
+
+ struct hlist_node node;
+ u32 handle;
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 31)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
+ struct eb_vma *vma;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct eb_vma *batch; /** identity of the batch obj/vma */
struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+ struct eb_vma *ev)
{
- unsigned int exec_flags = *vma->exec_flags;
+ struct i915_vma *vma = ev->vma;
u64 pin_flags;
if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
return false;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, ev->flags);
}
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(ev->vma, ev->flags);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -420,11 +427,11 @@ eb_validate_vma(struct i915_execbuffer *eb,
struct drm_i915_gem_exec_object2 *entry,
struct i915_vma *vma)
{
- struct drm_i915_private *i915 = eb->i915;
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
- if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+ if (unlikely(entry->alignment &&
+ !is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*
@@ -442,14 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
} else {
entry->pad_to_size = 0;
}
-
- if (unlikely(vma->exec_flags)) {
- drm_dbg(&i915->drm,
- "Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
@@ -472,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static int
+static void
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
+ struct eb_vma *ev = &eb->vma[i];
GEM_BUG_ON(i915_vma_is_closed(vma));
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
+ ev->vma = i915_vma_get(vma);
+ ev->exec = entry;
+ ev->flags = entry->flags;
if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
+ ev->handle = entry->handle;
+ hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
+ list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -519,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
*/
if (i == batch_idx) {
if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ !(ev->flags & EXEC_OBJECT_PINNED))
+ ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+ ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = vma;
+ eb->batch = ev;
}
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
+ if (eb_pin_vma(eb, entry, ev)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
+ eb_unreserve_vma(ev);
+ list_add_tail(&ev->bind_link, &eb->unbound);
}
- return err;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -563,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
+ struct eb_vma *ev,
+ u64 pin_flags)
{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
+ struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ unsigned int exec_flags = ev->flags;
+ struct i915_vma *vma = ev->vma;
int err;
- pin_flags = PIN_USER | PIN_NONBLOCK;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
@@ -584,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
- if (exec_flags & EXEC_OBJECT_PINNED) {
+ if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ if (drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(entry, vma, ev->flags)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
}
err = i915_vma_pin(vma,
@@ -613,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+ ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
@@ -622,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
struct list_head last;
- struct i915_vma *vma;
+ struct eb_vma *ev;
unsigned int i, pass;
- int err;
+ int err = 0;
/*
* Attempt to pin all of the buffers into the GTT.
@@ -641,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
* room for the earlier objects *unless* we need to defragment.
*/
+ if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+ return -EINTR;
+
pass = 0;
- err = 0;
do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
- if (err != -ENOSPC)
- return err;
+ if (!(err == -ENOSPC || err == -EAGAIN))
+ break;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ unsigned int flags;
+ ev = &eb->vma[i];
+ flags = ev->flags;
if (flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
- eb_unreserve_vma(vma, &eb->flags[i]);
+ eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
+ list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
+ list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
+ list_add(&ev->bind_link, &last);
else
- list_add_tail(&vma->exec_link, &last);
+ list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
+ if (err == -EAGAIN) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ flush_workqueue(eb->i915->mm.userptr_wq);
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ continue;
+ }
+
switch (pass++) {
case 0:
break;
@@ -689,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- return err;
+ goto unlock;
break;
default:
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
+
+ pin_flags = PIN_USER;
} while (1);
+
+unlock:
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -732,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+ return -ENOENT;
+
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
- mutex_lock(&eb->gem_context->mutex);
- if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
- err = -ENOENT;
- goto err_ctx;
- }
-
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -787,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_unlock(obj);
add_vma:
- err = eb_add_vma(eb, i, batch, vma);
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err))
goto err_vma;
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+ eb_add_vma(eb, i, batch, vma);
}
- mutex_unlock(&eb->gem_context->mutex);
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
+ return 0;
err_obj:
i915_gem_object_put(obj);
err_vma:
- eb->vma[i] = NULL;
-err_ctx:
- mutex_unlock(&eb->gem_context->mutex);
+ eb->vma[i].vma = NULL;
return err;
}
-static struct i915_vma *
+static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return eb->vma[handle];
+ return &eb->vma[handle];
} else {
struct hlist_head *head;
- struct i915_vma *vma;
+ struct eb_vma *ev;
head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
+ hlist_for_each_entry(ev, head, node) {
+ if (ev->handle == handle)
+ return ev;
}
return NULL;
}
@@ -837,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
if (!vma)
break;
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
+ eb->vma[i].vma = NULL;
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, ev->flags);
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
+ i915_vma_put(vma);
}
}
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
@@ -1198,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1329,11 +1310,11 @@ out:
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct eb_vma *ev,
const struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_i915_private *i915 = eb->i915;
- struct i915_vma *target;
+ struct eb_vma *target;
int err;
/* we've already hold a reference to all valid objects */
@@ -1365,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- *target->exec_flags |= EXEC_OBJECT_WRITE;
+ target->flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1375,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
- err = i915_vma_bind(target, target->obj->cache_level,
+ err = i915_vma_bind(target->vma,
+ target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
@@ -1388,17 +1370,17 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+ ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
drm_dbg(&i915->drm, "Relocation beyond object bounds: "
"target %d offset %d size %d.\n",
reloc->target_handle,
(int)reloc->offset,
- (int)vma->size);
+ (int)ev->vma->size);
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
@@ -1417,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* do relocations we are already stalling, disable the user's opt
* out of our synchronisation.
*/
- *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+ ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(vma, reloc, eb, target);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1458,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- pagefault_disable();
- copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
- pagefault_enable();
+ copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1468,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
remain -= count;
do {
- u64 offset = eb_relocate_entry(eb, vma, r);
+ u64 offset = eb_relocate_entry(eb, ev, r);
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
@@ -1510,278 +1490,34 @@ out:
return remain;
}
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- unsigned int i;
- int err;
-
- for (i = 0; i < entry->relocation_count; i++) {
- u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
- if ((s64)offset < 0) {
- err = (int)offset;
- goto err;
- }
- }
- err = 0;
-err:
- reloc_cache_reset(&eb->reloc_cache);
- return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
- const char __user *addr, *end;
- unsigned long size;
- char __maybe_unused c;
-
- size = entry->relocation_count;
- if (size == 0)
- return 0;
-
- if (size > N_RELOC(ULONG_MAX))
- return -EINVAL;
-
- addr = u64_to_user_ptr(entry->relocs_ptr);
- size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(addr, size))
- return -EFAULT;
-
- end = addr + size;
- for (; addr < end; addr += PAGE_SIZE) {
- int err = __get_user(c, addr);
- if (err)
- return err;
- }
- return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
+static int eb_relocate(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_relocation_entry *relocs;
- const unsigned int count = eb->buffer_count;
- unsigned int i;
int err;
- for (i = 0; i < count; i++) {
- const unsigned int nreloc = eb->exec[i].relocation_count;
- struct drm_i915_gem_relocation_entry __user *urelocs;
- unsigned long size;
- unsigned long copied;
-
- if (nreloc == 0)
- continue;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- goto err;
-
- urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
- size = nreloc * sizeof(*relocs);
-
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
- if (!relocs) {
- err = -ENOMEM;
- goto err;
- }
-
- /* copy_from_user is limited to < 4GiB */
- copied = 0;
- do {
- unsigned int len =
- min_t(u64, BIT_ULL(31), size - copied);
-
- if (__copy_from_user((char *)relocs + copied,
- (char __user *)urelocs + copied,
- len))
- goto end;
-
- copied += len;
- } while (copied < size);
-
- /*
- * As we do not update the known relocation offsets after
- * relocating (due to the complexities in lock handling),
- * we need to mark them as invalid now so that we force the
- * relocation processing next time. Just in case the target
- * object is evicted and then rebound into its old
- * presumed_offset before the next execbuffer - if that
- * happened we would make the mistake of assuming that the
- * relocations were valid.
- */
- if (!user_access_begin(urelocs, size))
- goto end;
-
- for (copied = 0; copied < nreloc; copied++)
- unsafe_put_user(-1,
- &urelocs[copied].presumed_offset,
- end_user);
- user_access_end();
-
- eb->exec[i].relocs_ptr = (uintptr_t)relocs;
- }
-
- return 0;
-
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
-err:
- while (i--) {
- relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
- if (eb->exec[i].relocation_count)
- kvfree(relocs);
- }
- return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
- struct drm_device *dev = &eb->i915->drm;
- bool have_copy = false;
- struct i915_vma *vma;
- int err = 0;
-
-repeat:
- if (signal_pending(current)) {
- err = -ERESTARTSYS;
- goto out;
- }
-
- /* We may process another execbuffer during the unlock... */
- eb_reset_vmas(eb);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * We take 3 passes through the slowpatch.
- *
- * 1 - we try to just prefault all the user relocation entries and
- * then attempt to reuse the atomic pagefault disabled fast path again.
- *
- * 2 - we copy the user entries to a local buffer here outside of the
- * local and allow ourselves to wait upon any rendering before
- * relocations
- *
- * 3 - we already have a local copy of the relocation entries, but
- * were interrupted (EAGAIN) whilst waiting for the objects, try again.
- */
- if (!err) {
- err = eb_prefault_relocations(eb);
- } else if (!have_copy) {
- err = eb_copy_relocations(eb);
- have_copy = err == 0;
- } else {
- cond_resched();
- err = 0;
- }
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* A frequent cause for EAGAIN are currently unavailable client pages */
- flush_workqueue(eb->i915->mm.userptr_wq);
-
- err = i915_mutex_lock_interruptible(dev);
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* reacquire the objects */
+ mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
+ mutex_unlock(&eb->gem_context->mutex);
if (err)
- goto err;
-
- GEM_BUG_ON(!eb->batch);
-
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (!have_copy) {
- pagefault_disable();
- err = eb_relocate_vma(eb, vma);
- pagefault_enable();
- if (err)
- goto repeat;
- } else {
- err = eb_relocate_vma_slow(eb, vma);
- if (err)
- goto err;
- }
- }
-
- /*
- * Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- if (err == -EAGAIN)
- goto repeat;
-
-out:
- if (have_copy) {
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry =
- &eb->exec[i];
- struct drm_i915_gem_relocation_entry *relocs;
-
- if (!entry->relocation_count)
- continue;
+ return err;
- relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- kvfree(relocs);
- }
+ if (!list_empty(&eb->unbound)) {
+ err = eb_reserve(eb);
+ if (err)
+ return err;
}
- return err;
-}
-
-static int eb_relocate(struct i915_execbuffer *eb)
-{
- if (eb_lookup_vmas(eb))
- goto slow;
-
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct i915_vma *vma;
+ struct eb_vma *ev;
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (eb_relocate_vma(eb, vma))
- goto slow;
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ return err;
}
}
return 0;
-
-slow:
- return eb_relocate_slow(eb);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1794,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (!err)
- continue;
-
- GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
- ww_mutex_unlock(&eb->vma[j]->resv->lock);
+ ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
- swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
- eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
- GEM_BUG_ON(vma != eb->vma[0]);
- vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
@@ -1825,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_done(&acquire);
while (i--) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
@@ -1870,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
- vma->exec_flags = NULL;
+ i915_vma_put(vma);
- if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
- i915_vma_put(vma);
+ ev->vma = NULL;
}
ww_acquire_fini(&acquire);
@@ -1887,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return 0;
err_skip:
- i915_request_skip(eb->request, err);
+ i915_request_set_error_once(eb->request, err);
return err;
}
@@ -2008,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
- err = i915_active_acquire(&eb->batch->active);
+ err = i915_active_acquire(&eb->batch->vma->active);
if (err)
goto err_free;
@@ -2025,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
- pw->batch = eb->batch;
+ pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
@@ -2067,7 +1795,7 @@ err_trampoline:
err_shadow:
i915_active_release(&shadow->active);
err_batch:
- i915_active_release(&eb->batch->active);
+ i915_active_release(&eb->batch->vma->active);
err_free:
kfree(pw);
return err;
@@ -2130,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(shadow);
- eb->flags[eb->buffer_count] =
- __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- shadow->exec_flags = &eb->flags[eb->buffer_count];
- eb->buffer_count++;
+ eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+ eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch = &eb->vma[eb->buffer_count++];
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = shadow;
shadow->private = pool;
return 0;
@@ -2165,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2192,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
err = eb->engine->emit_bb_start(eb->request,
- eb->batch->node.start +
+ batch->node.start +
eb->batch_start_offset,
eb->batch_len,
eb->batch_flags);
@@ -2327,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
intel_context_timeline_unlock(tl);
if (rq) {
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- err = -EINTR;
- goto err_exit;
- }
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (nonblock)
+ timeout = 0;
+
+ timeout = i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ timeout);
i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = nonblock ? -EWOULDBLOCK : timeout;
+ goto err_exit;
+ }
}
eb->engine = ce->engine;
@@ -2560,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
}
}
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (rq == end || !i915_request_retire(rq))
+ break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+ struct i915_request *rq = eb->request;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_request *prev;
+
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
+ /* Check that the context wasn't destroyed before submission */
+ if (likely(rcu_access_pointer(eb->context->gem_context))) {
+ attr = eb->gem_context->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+ } else {
+ /* Serialise with context_close via the add_to_timeline */
+ i915_request_set_error_once(rq, -ENOENT);
+ __i915_request_skip(rq);
+ }
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /* Try to clean up the client's timeline after submitting the request */
+ if (prev)
+ retire_requests(tl, prev);
+
+ mutex_unlock(&tl->mutex);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2572,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
+ struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2586,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
- eb.vma[0] = NULL;
- eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2656,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto err_engine;
-
err = eb_relocate(&eb);
if (err) {
/*
@@ -2673,21 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
+ if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
drm_dbg(&i915->drm,
"Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
- if (eb.batch_start_offset > eb.batch->size ||
- eb.batch_len > eb.batch->size - eb.batch_start_offset) {
+
+ if (range_overflows_t(u64,
+ eb.batch_start_offset, eb.batch_len,
+ eb.batch->vma->size)) {
drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
@@ -2697,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
+ batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;
@@ -2710,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation.
* So this is actually safe.
*/
- vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_parse;
}
- eb.batch = vma;
+ batch = vma;
}
/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2763,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- eb.request->batch = eb.batch;
- if (eb.batch->private)
- intel_engine_pool_mark_active(eb.batch->private, eb.request);
+ eb.request->batch = batch;
+ if (batch->private)
+ intel_engine_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb);
+ err = eb_submit(&eb, batch);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
- i915_request_add(eb.request);
+ eb_request_add(&eb);
if (fences)
signal_fence_array(&eb, fences);
@@ -2791,17 +2589,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(eb.batch);
+ i915_vma_unpin(batch);
err_parse:
- if (eb.batch->private)
- intel_engine_pool_put(eb.batch->private);
+ if (batch->private)
+ intel_engine_pool_put(batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- mutex_unlock(&dev->struct_mutex);
-err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
@@ -2819,9 +2615,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..cbbff81aa0af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
#include <linux/slab.h>
#include <linux/swiotlb.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e8cccc131c40..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -775,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e44a2f40b520..2faa481cc18f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
#include <drm/drm_file.h>
#include <drm/drm_device.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8897cd..e00792158f13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
0);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
@@ -196,6 +196,17 @@ out_unpin:
return err;
}
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (!IS_GEN(i915, 11))
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9) {
+ if (INTEL_GEN(i915) >= 9 &&
+ !wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
@@ -385,7 +397,7 @@ out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b07bb40edd5a..698e22420dc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages)) {
+ if (!IS_ERR_OR_NULL(pages))
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
- }
+
+ i915_gem_object_release_memory_region(obj);
+
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 830d3f96e1f6..03e5eb4c99d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 491cfbaaa330..5557dfa83a7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..37f77aee1212 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 63ead7a2b64a..7ffd7afeb7a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 375d864736f3..54b86cf7f5d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
@@ -1559,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1708,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1809,7 +1809,6 @@ static int igt_vm_isolation(void *arg)
vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
num_engines = 0;
@@ -1828,10 +1827,10 @@ static int igt_vm_isolation(void *arg)
u32 value = 0xc5c5c5c5;
u64 offset;
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset = round_down(offset, alignof_dword);
- offset += I915_GTT_PAGE_SIZE;
+ /* Leave enough space at offset 0 for the batch */
+ offset = igt_random_offset(&prng,
+ I915_GTT_PAGE_SIZE, vm_total,
+ sizeof(u32), alignof_dword);
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da20f35d..772d8cba7da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index b12ea1daa29d..e7e3c620f542 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
i915_gem_context_set_persistence(ctx);
mutex_init(&ctx->engines_mutex);
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000000..de595b66a746
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_primitives;
+ u32 max_urb_entries;
+ u32 cmd_size;
+ u32 state_size;
+ u32 state_start;
+ u32 batch_size;
+ u32 surface_height;
+ u32 surface_width;
+ u32 scratch_size;
+ u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ bv->max_primitives = 280;
+ bv->max_urb_entries = MAX_URB_ENTRIES;
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ bv->max_primitives = 128;
+ bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->cmd_size = bv->max_primitives * 4096;
+ bv->state_size = STATE_SIZE;
+ bv->state_start = bv->cmd_size;
+ bv->batch_size = bv->cmd_size + bv->state_size;
+ bv->scratch_size = bv->surface_height * bv->surface_width;
+ bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 12);
+
+ *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 urb_entries = bv->max_urb_entries;
+ u32 threads = bv->max_primitives - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int inline_data_size;
+ unsigned int media_batch_size;
+ unsigned int i;
+ u32 *cs;
+
+ inline_data_size = 112 * 8;
+ media_batch_size = inline_data_size + 6;
+
+ cs = batch_alloc_items(batch, 8, media_batch_size);
+
+ *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+ for (i = 3; i < inline_data_size; i++)
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 5);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned int desc_count = 64;
+ const u32 urb_size = 112;
+ struct batch_chunk cmds, state;
+ u32 interface_descriptor;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->cmd_size);
+ batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+ interface_descriptor =
+ gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+ gen7_emit_pipeline_flush(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_flush(&cmds);
+
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+ gen7_emit_interface_descriptor_load(&cmds,
+ interface_descriptor,
+ desc_count);
+
+ for (i = 0; i < bv->max_primitives; i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.max_size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000000..bb100748e2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d97d5c..94e746af8926 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
return pde;
}
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000000..b47f9d4a0848
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 8bb444cda14f..01474d3a558b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 000000000000..65dcd090245d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+ int err;
+
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ err = i915_active_wait(&ce->active);
+ if (err < 0)
+ goto unlock;
+
+ if (intel_context_is_pinned(ce)) {
+ err = -EBUSY; /* In active use, come back later! */
+ goto unlock;
+ }
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ struct intel_ring *ring;
+
+ /* Replace the existing ringbuffer */
+ ring = intel_engine_create_ring(ce->engine, sz);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto unlock;
+ }
+
+ intel_ring_put(ce->ring);
+ ce->ring = ring;
+
+ /* Context image will be updated on next pin */
+ } else {
+ ce->ring = __intel_context_ring_size(sz);
+ }
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+ long sz = (unsigned long)READ_ONCE(ce->ring);
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ sz = ce->ring->size;
+ intel_context_unlock_pinned(ce);
+ }
+
+ return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000000..f053d8633fe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 11278343b9b5..0f3b68b95c56 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -45,8 +45,8 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 29c8c03c5caa..b469de0dd9b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- return *READ_ONCE(execlists->active);
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 119c9cb24fd4..3aa8a652c16d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -275,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -301,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = gt->i915;
+ engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
@@ -313,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
@@ -320,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ engine->props.preempt_timeout_ms = 0;
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -340,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- gt->i915->engine[id] = engine;
+ i915->engine[id] = engine;
return 0;
}
@@ -1379,24 +1386,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
char hdr[160];
int len;
- len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
- len += snprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq),
- DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
- 1000 * 1000));
+ len += scnprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
- snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index b23366a81048..80cdde712842 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -547,6 +547,7 @@ struct intel_engine_cs {
struct {
unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 41a00281f364..aed498a0d032 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <drm/i915_drm.h>
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -157,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ return addr | _PAGE_PRESENT;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -172,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
ggtt->invalidate(ggtt);
}
@@ -185,7 +194,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
dma_addr_t addr;
/*
@@ -857,7 +866,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- ggtt->vm.pte_encode = gen8_pte_encode;
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
setup_private_pat(ggtt->vm.gt->uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718513bc..f04214a54f75 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 3dea8881e915..d09f7596cb98 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -667,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index bb9a6e638175..2a72cce63fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- mutex_lock(&vm->mutex);
+ if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ return;
+
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
i915_gem_object_put(obj);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
+
mutex_unlock(&vm->mutex);
}
@@ -484,30 +487,6 @@ void gtt_write_workarounds(struct intel_gt *gt)
}
}
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 23004445806a..b3116fe8d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
i915_vm_close(struct i915_address_space *vm)
{
GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
+ __i915_vm_close(vm);
i915_vm_put(vm);
}
@@ -515,10 +514,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags);
-
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 47561dc29304..112531b29f59 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -245,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
GEM_BUG_ON(i915_request_signaled(rq));
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -293,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority;
+ return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
@@ -1004,7 +1004,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
- rq->engine = owner;
+ WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
}
@@ -1316,7 +1316,7 @@ __execlists_schedule_out(struct i915_request *rq,
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
- if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
@@ -1448,6 +1448,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
{
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
+ bool sentinel = false;
trace_ports(execlists, msg, execlists->pending);
@@ -1481,6 +1482,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
ce = rq->context;
+ /*
+ * Sentinels are supposed to be lonely so they flush the
+ * current exection off the HW. Check that they are the
+ * only request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ sentinel = i915_request_has_sentinel(rq);
+ if (sentinel && port != execlists->pending) {
+ GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
@@ -1576,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
@@ -1593,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->fence.flags ^ next->fence.flags) &
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
@@ -1601,6 +1627,7 @@ static bool can_merge_rq(const struct i915_request *prev,
if (!can_merge_ctx(prev->context, next->context))
return false;
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
@@ -1651,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock);
}
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *last = READ_ONCE(execlists->active);
-
- while (*last && i915_request_completed(*last))
- last++;
-
- return *last;
-}
-
#define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \
@@ -1735,11 +1751,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine))
return false;
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return false;
-
- hint = max(rq_prio(list_next_entry(rq, sched.link)),
- engine->execlists.queue_priority_hint);
+ hint = engine->execlists.queue_priority_hint;
+ if (!list_is_last(&rq->sched.link, &engine->active.requests))
+ hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
return hint >= effective_prio(rq);
}
@@ -1762,12 +1776,13 @@ timeslice(const struct intel_engine_cs *engine)
static unsigned long
active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *engine->execlists.active;
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct i915_request *rq = *execlists->active;
if (!rq || i915_request_completed(rq))
return 0;
- if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
return 0;
return timeslice(engine);
@@ -1781,16 +1796,29 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ int prio = queue_prio(execlists);
+
+ WRITE_ONCE(execlists->switch_priority_hint, prio);
+ if (prio == INT_MIN)
+ return;
+
+ if (timer_pending(&execlists->timer))
+ return;
+
+ set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- struct i915_request *rq;
-
- rq = last_active(&engine->execlists);
if (!rq)
return 0;
@@ -1801,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
if (!intel_engine_has_preempt_reset(engine))
return;
set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine));
+ active_preempt_timeout(engine, rq));
}
static inline void clear_ports(struct i915_request **ports, int count)
@@ -1820,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request * const *active;
struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -1874,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*/
- last = last_active(execlists);
+ active = READ_ONCE(execlists->active);
+ while ((last = *active) && i915_request_completed(last))
+ active++;
+
if (last) {
if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine,
@@ -1944,11 +1977,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- if (!execlists->timer.expires &&
- need_timeslice(engine, last))
- set_timer_ms(&execlists->timer,
- timeslice(engine));
-
+ start_timeslice(engine);
return;
}
}
@@ -1983,7 +2012,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this for another */
+ start_timeslice(engine);
+ return; /* leave this for another sibling */
}
ENGINE_TRACE(engine,
@@ -1995,13 +2025,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
"",
yesno(engine != ve->siblings[0]));
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+ INT_MIN);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- rq->engine = engine;
+ WRITE_ONCE(rq->engine, engine);
if (engine != ve->siblings[0]) {
u32 *regs = ve->context.lrc_reg_state;
@@ -2121,6 +2152,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
submit = true;
last = rq;
@@ -2159,7 +2193,7 @@ done:
* Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts
*/
- if (!memcmp(execlists->active, execlists->pending,
+ if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) {
do
execlists_schedule_out(fetch_and_zero(port));
@@ -2170,7 +2204,7 @@ done:
clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine);
- set_preempt_timeout(engine);
+ set_preempt_timeout(engine, *active);
} else {
skip_submit:
ring_set_paused(engine, 0);
@@ -2191,6 +2225,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_schedule_out(*port);
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
}
@@ -2345,6 +2380,7 @@ static void process_csb(struct intel_engine_cs *engine)
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
+ smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
@@ -2352,11 +2388,12 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
- WRITE_ONCE(execlists->active,
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending)));
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@@ -2579,6 +2616,10 @@ static void __execlists_unhold(struct i915_request *rq)
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
if (w->engine != rq->engine)
continue;
@@ -2966,6 +3007,7 @@ __execlists_update_reg_state(const struct intel_context *ce,
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
@@ -3636,9 +3678,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
@@ -3648,8 +3687,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
goto out_replay;
}
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
rq = active_request(ce->timeline, rq);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
@@ -3723,7 +3766,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(unsigned long data)
{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
/* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -4119,26 +4165,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
*cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
-
- /*
- * Wa_1604544889:tgl
- */
- if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
- flags = 0;
- flags |= PIPE_CONTROL_CS_STALL;
- flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags,
- LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
}
return 0;
@@ -4877,7 +4903,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
- i915_request_skip(rq, -ENODEV);
+ i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
@@ -4891,7 +4917,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(unsigned long data)
{
struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = ve->base.execlists.queue_priority_hint;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -5287,11 +5313,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tE ");
}
- last = NULL;
- count = 0;
+ if (execlists->switch_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tSwitch priority hint: %d\n",
+ READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
- execlists->queue_priority_hint);
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index bef132709854..66c07c32745c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
@@ -319,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_t(u64,
- i915->dsm.start,
- pctx->stolen->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
pctx_paddr = i915->dsm.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index aef6ab58d7d9..8b170c1876b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->context == hung_ctx)
- i915_request_skip(rq, -EIO);
+ if (rq->context == hung_ctx) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -92,13 +94,7 @@ static bool mark_guilty(struct i915_request *rq)
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
-
- if (i915_gem_context_is_closed(ctx)) {
- intel_context_set_banned(rq->context);
- banned = true;
- goto out;
- }
+ return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
@@ -154,11 +150,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
- i915_request_skip(rq, -EIO);
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
} else {
- dma_fence_set_error(&rq->fence, -EAGAIN);
+ i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
@@ -785,7 +782,7 @@ static void nop_submit_request(struct i915_request *request)
unsigned long flags;
RQ_TRACE(request, "-EIO\n");
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_set_error_once(request, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index f70b903a98bc..1424582e4a9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
#include <linux/log2.h>
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -897,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
@@ -1360,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1435,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->context->state) | flags;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1550,13 +1549,56 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
+ struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
+ void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
@@ -1564,7 +1606,7 @@ static int switch_context(struct i915_request *rq)
if (ce->state) {
u32 flags;
- GEM_BUG_ON(rq->engine->id != RCS0);
+ GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1576,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
else
flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, flags);
+ ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
@@ -1585,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
if (ret)
return ret;
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
return 0;
}
@@ -1769,6 +1825,11 @@ static void ring_release(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
@@ -1916,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1969,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
+err_ring_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
@@ -1984,3 +2111,7 @@ err:
intel_engine_cleanup_common(engine);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 30ae29b30f11..87f9638d2cbf 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -68,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
@@ -115,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
-
+ WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
@@ -642,7 +645,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && rps->active)
+ if (!rps->power.interactive++ && READ_ONCE(rps->active))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +722,15 @@ void intel_rps_unpark(struct intel_rps *rps)
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- rps->active = true;
+
+ WRITE_ONCE(rps->active, true);
+
freq = max(rps->cur_freq, rps->efficient_freq),
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
intel_rps_set(rps, freq);
+
rps->last_adj = 0;
+
mutex_unlock(&rps->lock);
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +750,7 @@ void intel_rps_park(struct intel_rps *rps)
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- rps->active = false;
+ WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -767,10 +774,10 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &rq->engine->gt->rps;
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !rps->active)
+ if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
return;
/* Serializes with i915_request_retire() */
@@ -1453,12 +1460,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1554,11 +1561,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(&gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 54e1e55f3c81..91debbc97c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl)
{
+ if (!i915_active_acquire_if_busy(&cl->active)) {
+ __idle_cacheline_free(cl);
+ return;
+ }
+
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
- if (i915_active_is_idle(&cl->active))
- __idle_cacheline_free(cl);
+ i915_active_release(&cl->active);
}
int intel_timeline_init(struct intel_timeline *timeline,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 06cef3c18f26..5176ad1a3976 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -575,12 +575,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- /* Wa_1409142259:tgl */
+ /*
+ * Wa_1409142259:tgl
+ * Wa_1409347922:tgl
+ * Wa_1409252684:tgl
+ * Wa_1409217633:tgl
+ * Wa_1409207793:tgl
+ * Wa_1409178076:tgl
+ * Wa_1408979724:tgl
+ */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -593,6 +610,11 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
*/
wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128, 0);
+
+ /* WaDisableGPGPUMidThreadPreemption:tgl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -898,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
- /* Wa_1406680159:icl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
@@ -931,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
- /* Wa_1409180338:tgl */
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
@@ -1246,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
@@ -1259,6 +1277,9 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
/* Wa_1808121037:tgl */
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
break;
default:
break;
@@ -1325,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1607138336:tgl */
+ /*
+ * Wa_1607138336:tgl
+ * Wa_1607063988:tgl
+ */
wa_write_or(wal,
GEN9_CTX_PREEMPT_REG,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- /* Wa_1607030317:tgl */
- /* Wa_1607186500:tgl */
- /* Wa_1607297627:tgl */
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+ * of then says it is fixed on B0 the other one says it is
+ * permanent
+ */
wa_masked_en(wal,
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1356,10 +1379,29 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+ }
+
+ if (IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /* Wa_1409804808:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /* Wa_1606700617:tgl */
wa_masked_en(wal,
- GEN7_ROW_CHICKEN2,
- GEN12_DISABLE_EARLY_READ);
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1425,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 11)) {
- /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+ if (IS_GEN_RANGE(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1595,15 +1665,34 @@ err_obj:
return ERR_PTR(err);
}
+static const struct {
+ u32 start;
+ u32 end;
+} mcr_ranges_gen8[] = {
+ { .start = 0x5500, .end = 0x55ff },
+ { .start = 0x7000, .end = 0x7fff },
+ { .start = 0x9400, .end = 0x97ff },
+ { .start = 0xb000, .end = 0xb3ff },
+ { .start = 0xe000, .end = 0xe7ff },
+ {},
+};
+
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
+ int i;
+
+ if (INTEL_GEN(i915) < 8)
+ return false;
+
/*
- * Registers in this range are affected by the MCR selector
+ * Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
- return true;
+ for (i = 0; mcr_ranges_gen8[i].start; i++)
+ if (offset >= mcr_ranges_gen8[i].start &&
+ offset <= mcr_ranges_gen8[i].end)
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000000..610ca7687735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5633515c12e9..4a53ded7c2dd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -244,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d589749f..697114dd1f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ out:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_pulse);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err && err != -ENODEV)
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index c3514ec7b8db..2b2efff6e19d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index febd608c23a7..6f06ba750a0a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -90,6 +90,49 @@ static int wait_for_submit(struct intel_engine_cs *engine,
return -ETIME;
}
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -1805,14 +1848,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1870,10 +1908,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
igt_spinner_end(&arg->a.spin);
- if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
@@ -1953,10 +1990,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -2014,14 +2050,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -4015,6 +4046,32 @@ static int emit_semaphore_signal(struct intel_context *ce, void *slot)
return 0;
}
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -4638,18 +4695,10 @@ static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
wmb();
}
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- err = -ETIME;
- goto err;
- }
-
- /* and wait for switch to kernel */
- if (igt_flush_test(arg->engine->i915)) {
- err = -EIO;
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
goto err;
- }
-
- rmb();
if (!timestamp_advanced(arg->poison, slot[1])) {
pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
@@ -4674,9 +4723,9 @@ err:
static int live_lrc_timestamp(void *arg)
{
+ struct lrc_timestamp data = {};
struct intel_gt *gt = arg;
enum intel_engine_id id;
- struct lrc_timestamp data;
const u32 poison[] = {
0,
S32_MAX,
@@ -4748,6 +4797,677 @@ err:
return 0;
}
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = poison;
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ x = 0;
+ dw = 0;
+ hw = engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ break;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err) {
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ goto err_result1;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_result1;
+ }
+ i915_request_put(rq);
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int err = 0;
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ if (engine->pinned_default_state) {
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ err = __lrc_isolation(engine, poison[i]);
+ if (err)
+ break;
+
+ err = __lrc_isolation(engine, ~poison[i]);
+ if (err)
+ break;
+ }
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ tasklet_disable(&engine->execlists.tasklet);
+
+ if (!rq->fence.error)
+ intel_engine_reset(engine, NULL);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_PN * PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
{
struct intel_context *ce;
@@ -4845,7 +5565,9 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
};
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000000..9995faadd7e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (INTEL_GEN(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GEN_RANGE(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (HAS_EXECLISTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000000..8f9b2f33dbaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ u32 caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+ len = 0;
+ for_each_set_bit(n,
+ (unsigned long *)&caps,
+ show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_nsecs(2))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000000..9546fffe03a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1beaa77f9bb6..fe7778c28d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 5434c07aefa1..18c755203688 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 771420453f82..8b13f091cee2 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->ggtt.vm.mutex);
- mmio_hw_access_pre(dev_priv);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+ mutex_lock(&gt->ggtt->vm.mutex);
+ mmio_hw_access_pre(gt);
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mmio_hw_access_post(gt);
+ mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
int ret;
ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gt *gt = gvt->gt;
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
}
/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(uncore->rpm);
- if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
- if (WARN_ON(!reg))
+ if (drm_WARN_ON(&i915->drm, !reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(value));
- I915_WRITE(fence_reg_lo, lower_32_bits(value));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
+ intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
+ intel_uncore_posting_read(uncore, fence_reg_lo);
}
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
u32 i;
- if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
int i;
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(&dev_priv->ggtt);
+ reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
+
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- _clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
+ _clear_vgpu_fence(vgpu);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bbe059d..072725a448db 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 4))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
- if (WARN_ON(bytes > 4))
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
- if (WARN_ON(bytes > 2))
+ if (drm_WARN_ON(&i915->drm, bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
@@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case INTEL_GVT_PCI_OPREGION:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
@@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21a176cd8acc..9e065ad0658f 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -462,7 +462,7 @@ enum {
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(gvt->dev_priv, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
u32 valid_len = CMD_LEN(1);
/*
@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
}
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
- if (s->ring_id == BCS0 &&
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1230,7 +1232,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
- if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
@@ -1250,7 +1252,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
- WARN_ON(1);
+ drm_WARN_ON(&dev_priv->drm, 1);
return -EBADRQC;
}
return 0;
@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size,
unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va;
const struct cmd_info *info;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset,
PAGE_SIZE));
if (IS_ERR(bb->obj)) {
@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index];
else
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s),
- info->valid_len);
+ info->valid_len);
if (ret)
return ret;
}
@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- const struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- const struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 285f6011a537..ec47d4114554 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
+ preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->dev_priv);
+ mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->dev_priv);
+ mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
@@ -128,6 +127,7 @@ static int
vgpu_scan_nonprivbb_get(void *data, u64 *val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
*val = vgpu->scan_nonprivbb;
return 0;
}
@@ -142,42 +142,7 @@ static int
vgpu_scan_nonprivbb_set(void *data, u64 val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum intel_engine_id id;
- char buf[128], *s;
- int len;
-
- val &= (1 << I915_NUM_ENGINES) - 1;
-
- if (vgpu->scan_nonprivbb == val)
- return 0;
-
- if (!val)
- goto done;
-
- len = sprintf(buf,
- "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
- vgpu->id);
-
- s = buf + len;
-
- for (id = 0; id < I915_NUM_ENGINES; id++) {
- struct intel_engine_cs *engine;
-
- engine = dev_priv->engine[id];
- if (engine && (val & (1 << id))) {
- len = snprintf(s, 4, "%d, ", engine->id);
- s += len;
- } else
- val &= ~(1 << id);
- }
-
- if (val)
- sprintf(s, "low performance expected.");
-
- pr_warn("%s\n", buf);
-done:
vgpu->scan_nonprivbb = val;
return 0;
}
@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e1c313da6c00..6e5c9885d9fe 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
@@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
@@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -319,9 +320,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
- if (WARN_ON(resolution >= GVT_EDID_NUM))
+ if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
return -EINVAL;
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
@@ -389,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
@@ -454,10 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -483,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
@@ -505,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index ae139f0877ae..37fc460414a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -67,11 +67,11 @@ static int vgpu_gem_get_pages(
u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
- if (WARN_ON(!fb_info))
+ if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
vgpu = fb_info->obj->vgpu;
- if (WARN_ON(!vgpu))
+ if (drm_WARN_ON(&dev_priv->drm, !vgpu))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
@@ -523,7 +523,7 @@ out:
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124918f1..190651df5db1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
- if (WARN_ON(port < 0))
+ if (drm_WARN_ON(&i915->drm, port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
@@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- WARN_ON(1);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ drm_WARN_ON(&i915->drm, 1);
return 0;
}
@@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
@@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
@@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset,
void *p_data)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
@@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* support the gfx driver to do EDID access.
*/
} else {
- if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
return;
- if (WARN_ON(msg_length != 4))
+ if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a1189bad..dd25c3024370 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
-#define execlist_ring_mmio(gvt, ring_id, offset) \
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
{
- if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
- return context_switch_events[ring_id];
+ return context_switch_events[engine->id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
- ring_id, _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
- struct execlist_context_status_format *status,
- bool trigger_interrupt_later)
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_BUF);
+ ctx_status_ptr_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- vgpu->hws_pga[ring_id]);
+ vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
- write_pointer * 8,
- status, 8);
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa +
- intel_hws_csb_write_index(dev_priv) * 4,
- &write_pointer, 4);
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
- ring_id_to_context_switch_event(execlist->ring_id));
+ to_context_switch_event(execlist->engine));
}
static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
int ret;
if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+ ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist =
+ &s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false;
int ret = 0;
- gvt_dbg_el("complete workload %p status %d\n", workload,
- workload->status);
+ gvt_dbg_el("complete workload %p status %d\n",
+ workload, workload->status);
- if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out;
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -436,14 +429,15 @@ out:
return ret;
}
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
- struct execlist_ctx_descriptor_format *desc,
- bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in)
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
- emulate_schedule_in);
+ emulate_schedule_in);
intel_vgpu_queue_workload(workload);
return 0;
}
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -504,22 +499,22 @@ inv_desc:
return -EINVAL;
}
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
- execlist->ring_id = ring_id;
+ execlist->engine = engine;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
- init_vgpu_execlist(vgpu, engine->id);
+ init_vgpu_execlist(vgpu, engine);
}
static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd30c83..d62cd14605a3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
- int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
+ const struct intel_engine_cs *engine;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 8bb292b01271..0889ad8291b0 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index b0c1fda32977..990a181094e3 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
-
- *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -208,8 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4a4828074cb7..2a4b23f8aa74 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{
- if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
return -EACCES;
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
@@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{
- if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
return -EACCES;
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
@@ -275,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type;
}
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr);
}
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_pre(gt);
+ intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ mmio_hw_access_post(gt);
}
-static void write_pte64(struct drm_i915_private *dev_priv,
- unsigned long index, u64 pte)
+static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr);
}
@@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else {
e->val64 = *((u64 *)pt + index);
}
@@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
@@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type;
@@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (!gtt_type_is_pt(cur_pt_type) ||
!gtt_type_is_pt(cur_pt_type + 1)) {
- WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ drm_WARN(&i915->drm, 1,
+ "Invalid page table type, cur_pt_type is: %d\n",
+ cur_pt_type);
return -EINVAL;
}
@@ -1044,7 +1050,7 @@ fail:
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
- if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -2314,7 +2320,7 @@ out:
ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate(gvt->dev_priv);
+ ggtt_invalidate(gvt->gt);
return 0;
}
@@ -2347,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
- if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ if (drm_WARN_ON(&i915->drm,
+ type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
@@ -2410,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2682,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2731,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
@@ -2779,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry;
@@ -2809,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
- ggtt_invalidate(dev_priv);
+ ggtt_invalidate(gvt->gt);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eefa0a02..9e1787867894 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
@@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
+ const char *driver_name =
+ dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
+ name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
+ struct intel_vgpu_type *t = &gvt->types[i];
+
+ if (!strncmp(t->name, name, sizeof(t->name)))
return t;
}
@@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
-static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups)
+static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
{
- *type_attrs = gvt_type_attrs;
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
@@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
* intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct intel_gvt *gvt = to_gvt(dev_priv);
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (WARN_ON(!gvt))
+ if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
@@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- kfree(dev_priv->gvt);
- dev_priv->gvt = NULL;
+ kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
@@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed.
*
*/
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
- if (WARN_ON(dev_priv->gvt))
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->dev_priv = dev_priv;
+ gvt->gt = &i915->gt;
+ i915->gvt = gvt;
init_device_info(gvt);
@@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- dev_priv->gvt = gvt;
- intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
@@ -404,6 +402,7 @@ out_clean_mmio_info:
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b47c6acaf9c0..58c2c7932e3f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,41 +196,21 @@ struct intel_vgpu {
struct dentry *debugfs;
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- struct {
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
-
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- } vdev;
-#endif
+ /* Hypervisor-specific device state. */
+ void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
- struct completion vblank_done;
-
u32 scan_nonprivbb;
};
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+ return vgpu->vdev;
+}
+
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
@@ -306,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock;
- struct drm_i915_private *dev_priv;
+ struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
@@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
+#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
+
/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
-#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
-#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
+#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
@@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -570,8 +551,7 @@ struct intel_gvt_ops {
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
const char *name);
- bool (*get_gvt_attrs)(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups);
+ bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
@@ -586,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct intel_gt *gt)
{
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put_unchecked(gt->uncore->rpm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ae6700dc9d73..0182e2a5acff 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,17 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
+ struct drm_i915_private *i915 = gvt->gt->i915;
+
+ if (IS_BROADWELL(i915))
return D_BDW;
- else if (IS_SKYLAKE(gvt->dev_priv))
+ else if (IS_SKYLAKE(i915))
return D_SKL;
- else if (IS_KABYLAKE(gvt->dev_priv))
+ else if (IS_KABYLAKE(i915))
return D_KBL;
- else if (IS_BROXTON(gvt->dev_priv))
+ else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(gvt->dev_priv))
+ else if (IS_COFFEELAKE(i915))
return D_CFL;
return 0;
@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
}
/**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
*/
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{
- enum intel_engine_id id;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
offset &= ~GENMASK(11, 0);
- for_each_engine(engine, gvt->dev_priv, id) {
+ for_each_engine(engine, gvt->gt, id)
if (engine->mmio_base == offset)
- return id;
- }
- return -ENODEV;
+ return engine;
+
+ return NULL;
}
#define offset_to_fence_num(offset) \
@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
int ret;
@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(dev_priv);
+ mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_post(gvt->gt);
return 0;
}
@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+ engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
};
/* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
- u32 ring_base;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ret = -EINVAL;
-
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, ring_id, offset, bytes);
- return ret;
- }
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- ring_base = dev_priv->engine[ring_id]->mmio_base;
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return -EINVAL;
+ }
- if (in_whitelist(reg_nonpriv) ||
- reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
- ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
- bytes);
- } else
+ if (!in_whitelist(reg_nonpriv) &&
+ reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, *(u32 *)p_data, offset);
+ vgpu->id, reg_nonpriv, offset);
+ } else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
return 0;
}
@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum intel_gvt_event_type event;
if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
@@ -836,7 +833,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
- WARN_ON(true);
+ drm_WARN_ON(&dev_priv->drm, true);
return -EINVAL;
}
@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+ if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
- } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */
return 0;
@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL};
char vmid_str[20];
char display_ready_str[20];
@@ -1306,13 +1302,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
- WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
- vgpu->id);
+ drm_WARN_ONCE(&i915->drm, true,
+ "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
return 0;
}
@@ -1360,13 +1358,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 mode;
write_vreg(vgpu, offset, p_data, bytes);
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
+ drm_WARN_ONCE(&i915->drm, 1,
+ "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
@@ -1377,10 +1377,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
- WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ drm_WARN(&i915->drm, 1,
+ "VM(%d): Use physical address for TRTT!\n",
vgpu->id);
return -EINVAL;
}
@@ -1427,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1439,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
- } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1452,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1478,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
}
+
/*
* Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
- if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ if (unlikely(!engine)) {
gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset);
return -EINVAL;
}
- vgpu->hws_pga[ring_id] = value;
+ vgpu->hws_pga[engine->id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset);
@@ -1507,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- if (IS_BROXTON(vgpu->gvt->dev_priv))
+ if (IS_BROXTON(vgpu->gvt->gt->i915))
v &= (1 << 31) | (1 << 29);
else
v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1654,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- int ring_id;
- u32 ring_base;
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(gvt, offset);
- ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
- if (ring_id >= 0)
- ring_base = dev_priv->engine[ring_id]->mmio_base;
-
- if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
+
+ if (!engine ||
+ vgpu == gvt->scheduler.engine_owner[engine->id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
+ mmio_hw_access_pre(gvt->gt);
+ vgpu_vreg(vgpu, offset) =
+ intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
+ mmio_hw_access_post(gvt->gt);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1682,22 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
+ if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
- execlist = &vgpu->submission.execlist[ring_id];
+ execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
- ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ ret = intel_vgpu_submit_execlist(vgpu, engine);
if(ret)
- gvt_vgpu_err("fail submit workload on ring %d\n",
- ring_id);
+ gvt_vgpu_err("fail submit workload on ring %s\n",
+ engine->name);
}
++execlist->elsp_dwords.index;
@@ -1709,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
bool enable_execlist;
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1723,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1743,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
- gvt_dbg_core("EXECLIST %s on ring %d\n",
- (enable_execlist ? "enabling" : "disabling"),
- ring_id);
+ gvt_dbg_core("EXECLIST %s on ring %s\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ engine->name);
if (!enable_execlist)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- BIT(ring_id),
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ engine->mask,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1876,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2693,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2882,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -3131,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3367,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret;
@@ -3379,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
- if (IS_BROADWELL(dev_priv)) {
+ if (IS_BROADWELL(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
@@ -3541,13 +3545,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 11accd3e1023..540017fed908 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
@@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
if (info->has_upstream_irq)
@@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
@@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
- WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+ drm_WARN_ON(&i915->drm, up_irq_info !=
+ irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
@@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- if (WARN_ON(!up_irq_info))
+ if (drm_WARN_ON(&i915->drm, !up_irq_info))
return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
@@ -536,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+ if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
- WARN_ON(!handler);
+ drm_WARN_ON(&i915->drm, !handler);
handler(irq, event, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3259a1fa69e1..074c4efb58eb 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
struct kref ref;
};
+struct kvmgt_vdev {
+ struct intel_vgpu *vgpu;
+ struct mdev_device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+ return intel_vgpu_vdev(vgpu);
+}
+
static inline bool handle_valid(unsigned long handle)
{
return !!(handle & ~0xff);
@@ -120,6 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int total_pages;
int npage;
int ret;
@@ -129,8 +160,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
- WARN_ON(ret != 1);
+ ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+ drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -152,7 +183,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -187,7 +218,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL;
int ret;
@@ -210,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
@@ -219,7 +250,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -237,7 +268,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -258,6 +289,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -270,7 +302,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vgpu->vdev.gfn_cache.rb_node;
+ link = &vdev->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -281,11 +313,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vgpu->vdev.dma_addr_cache.rb_node;
+ link = &vdev->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -296,46 +328,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
- vgpu->vdev.nr_cache_entries++;
+ vdev->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
- rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ rb_erase(&entry->gfn_node, &vdev->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
kfree(entry);
- vgpu->vdev.nr_cache_entries--;
+ vdev->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.gfn_cache);
+ mutex_lock(&vdev->cache_lock);
+ node = rb_first(&vdev->gfn_cache);
if (!node) {
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- vgpu->vdev.gfn_cache = RB_ROOT;
- vgpu->vdev.dma_addr_cache = RB_ROOT;
- vgpu->vdev.nr_cache_entries = 0;
- mutex_init(&vgpu->vdev.cache_lock);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ vdev->gfn_cache = RB_ROOT;
+ vdev->dma_addr_cache = RB_ROOT;
+ vdev->nr_cache_entries = 0;
+ mutex_init(&vdev->cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -409,16 +446,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vgpu->vdev.region[i].data;
+ void *base = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vgpu->vdev.region[i].size || iswrite) {
+
+ if (pos >= vdev->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ count = min(count, (size_t)(vdev->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -512,7 +551,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
- (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -544,32 +583,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vgpu->vdev.region,
- (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vgpu->vdev.region = region;
- vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
- vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
- vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
- vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
- vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
- vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
- vgpu->vdev.num_regions++;
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+ vdev->num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- vgpu->vdev.vfio_device = vfio_device_get_from_dev(
- mdev_dev(vgpu->vdev.mdev));
- if (!vgpu->vdev.vfio_device) {
+ vdev->vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vdev->mdev));
+ if (!vdev->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
@@ -637,10 +678,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
static void kvmgt_put_vfio_device(void *vgpu)
{
- if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
+
+ if (WARN_ON(!vdev->vfio_device))
return;
- vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+ vfio_device_put(vdev->vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
@@ -669,9 +712,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
goto out;
}
- INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+ INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
- vgpu->vdev.mdev = mdev;
+ kvmgt_vdev(vgpu)->mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
@@ -696,9 +739,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.iommu_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ iommu_notifier);
+ struct intel_vgpu *vgpu = vdev->vgpu;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -708,7 +752,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -718,7 +762,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
return NOTIFY_OK;
@@ -727,16 +771,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.group_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->vdev.kvm = data;
+ vdev->kvm = data;
if (!data)
- schedule_work(&vgpu->vdev.release_work);
+ schedule_work(&vdev->release_work);
}
return NOTIFY_OK;
@@ -745,15 +789,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
static int intel_vgpu_open(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
- vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -762,7 +807,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
@@ -781,51 +826,56 @@ static int intel_vgpu_open(struct mdev_device *mdev)
intel_gvt_ops->vgpu_activate(vgpu);
- atomic_set(&vgpu->vdev.released, 0);
+ atomic_set(&vdev->released, 0);
return ret;
undo_group:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vgpu->vdev.msi_trigger;
+ trigger = vdev->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vgpu->vdev.msi_trigger = NULL;
+ vdev->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
return;
- if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ if (atomic_cmpxchg(&vdev->released, 0, 1))
return;
intel_gvt_ops->vgpu_release(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
- WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+ &vdev->iommu_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
- WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+ &vdev->group_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
@@ -835,7 +885,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vgpu->vdev.kvm = NULL;
+ vdev->kvm = NULL;
vgpu->handle = 0;
}
@@ -848,10 +898,10 @@ static void intel_vgpu_release(struct mdev_device *mdev)
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.release_work);
+ struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
+ release_work);
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vdev->vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -913,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL;
}
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off));
if (!aperture_va)
@@ -933,12 +983,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -967,11 +1018,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ return vdev->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
@@ -1224,7 +1275,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- vgpu->vdev.msi_trigger = trigger;
+ kvmgt_vdev(vgpu)->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1276,6 +1327,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1294,7 +1346,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions;
+ vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1385,22 +1437,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions)
+ vdev->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions);
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->vdev.region[i].size;
- info.flags = vgpu->vdev.region[i].flags;
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- cap_type.type = vgpu->vdev.region[i].type;
- cap_type.subtype = vgpu->vdev.region[i].subtype;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1597,12 +1649,10 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute **kvm_type_attrs;
struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
- &kvm_vgpu_type_groups))
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
@@ -1742,13 +1792,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EEXIST;
- kvm = vgpu->vdev.kvm;
+ vdev = kvmgt_vdev(vgpu);
+ kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
@@ -1769,8 +1821,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- init_completion(&vgpu->vblank_done);
-
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1778,7 +1828,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->debugfs_cache_entries = debugfs_create_ulong(
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
- &vgpu->vdev.nr_cache_entries);
+ &vdev->nr_cache_entries);
return 0;
}
@@ -1795,9 +1845,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
return true;
}
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
- /* nothing to do here */
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
+
+ if (!vgpu->vdev)
+ return -ENOMEM;
+
+ kvmgt_vdev(vgpu)->vgpu = vgpu;
+
return 0;
}
@@ -1805,29 +1863,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu)
{
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vgpu->vdev.region)
+ if (!vdev->region)
return;
- for (i = 0; i < vgpu->vdev.num_regions; i++)
- if (vgpu->vdev.region[i].ops->release)
- vgpu->vdev.region[i].ops->release(vgpu,
- &vgpu->vdev.region[i]);
- vgpu->vdev.num_regions = 0;
- kfree(vgpu->vdev.region);
- vgpu->vdev.region = NULL;
+ for (i = 0; i < vdev->num_regions; i++)
+ if (vdev->region[i].ops->release)
+ vdev->region[i].ops->release(vgpu,
+ &vdev->region[i]);
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL;
+
+ kfree(vdev);
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
+ vdev = kvmgt_vdev(vgpu);
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
@@ -1838,10 +1901,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (vgpu->vdev.msi_trigger == NULL)
+ if (vdev->msi_trigger == NULL)
return 0;
- if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ if (eventfd_signal(vdev->msi_trigger, 1) == 1)
return 0;
return -EFAULT;
@@ -1867,26 +1930,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
if (!handle_valid(handle))
return -EINVAL;
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else if (entry->size != size) {
@@ -1898,7 +1961,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1906,19 +1969,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
@@ -1926,14 +1990,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
+ vdev = kvmgt_vdev(info->vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
@@ -1949,19 +2014,21 @@ static void __gvt_dma_release(struct kref *ref)
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
if (!handle_valid(handle))
return;
- info = (struct kvmgt_guest_info *)handle;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vdev->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..291993615af9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
@@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
- if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
goto err;
}
@@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
@@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
- if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 2e68f4b02c94..cc4812648bf4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int reg);
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index aaf15916d29a..2ccaf78f96e8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00,
};
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(const struct intel_engine_cs *engine)
{
- struct intel_gvt *gvt = dev_priv->gvt;
- i915_reg_t offset;
+ struct intel_gvt *gvt = engine->i915->gvt;
+ struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
+ i915_reg_t offset;
int ring_id, i;
/* Platform doesn't have mocs mmios. */
@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(dev_priv, ring_id))
+ if (!HAS_ENGINE(engine->i915, ring_id))
continue;
+
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
}
@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
gen9_render_mocs.initialized = true;
@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id ||
- !mmio->in_context)
+ if (mmio->id != ring_id || !mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
- *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
- (mmio->mask << 16);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
[VECS0] = 0x4270,
};
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (!regs)
return;
- if (WARN_ON(ring_id >= cnt))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
return;
- if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
return;
- reg = _MMIO(regs[ring_id]);
+ reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+ if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
+ gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
+ engine->name);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw);
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+ gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
}
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
- i915_reg_t offset, l3_offset;
- u32 old_v, new_v;
-
u32 regs[] = {
[RCS0] = 0xc800,
[VCS0] = 0xc900,
@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
+ struct intel_uncore *uncore = engine->uncore;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
int i;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+ if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
return;
if (!pre && !gen9_render_mocs.initialized)
- load_render_mocs(dev_priv);
+ load_render_mocs(engine);
- offset.reg = regs[ring_id];
+ offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
- old_v = gen9_render_mocs.control_table[ring_id][i];
+ old_v = gen9_render_mocs.control_table[engine->id][i];
if (next)
new_v = vgpu_vreg_t(next, offset);
else
- new_v = gen9_render_mocs.control_table[ring_id][i];
+ new_v = gen9_render_mocs.control_table[engine->id][i];
if (old_v != new_v)
- I915_WRITE_FW(offset, new_v);
+ intel_uncore_write_fw(uncore, offset, new_v);
offset.reg += 4;
}
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v)
- I915_WRITE_FW(l3_offset, new_v);
+ intel_uncore_write_fw(uncore, l3_offset, new_v);
l3_offset.reg += 4;
}
@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s;
struct engine_mmio *mmio;
u32 old_v, new_v;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (INTEL_GEN(dev_priv) >= 9)
- switch_mocs(pre, next, ring_id);
+ if (INTEL_GEN(engine->i915) >= 9)
+ switch_mocs(pre, next, engine);
- for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id)
+ if (mmio->id != engine->id)
continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(dev_priv, 9) && mmio->in_context)
+ if (IS_GEN(engine->i915, 9) && mmio->in_context)
continue;
// save
if (pre) {
- vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ vgpu_vreg_t(pre, mmio->reg) =
+ intel_uncore_read_fw(uncore, mmio->reg);
if (mmio->mask)
vgpu_vreg_t(pre, mmio->reg) &=
- ~(mmio->mask << 16);
+ ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg);
- } else
- old_v = mmio->value = I915_READ_FW(mmio->reg);
+ } else {
+ old_v = mmio->value =
+ intel_uncore_read_fw(uncore, mmio->reg);
+ }
// restore
if (next) {
@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow[ring_id]))
+ !is_inhibit_context(s->shadow[engine->id]))
continue;
if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) |
- (mmio->mask << 16);
+ (mmio->mask << 16);
else
new_v = vgpu_vreg_t(next, mmio->reg);
} else {
@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
new_v = mmio->value;
}
- I915_WRITE_FW(mmio->reg, new_v);
+ intel_uncore_write_fw(uncore, mmio->reg, new_v);
trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0,
@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
}
if (next)
- handle_tlb_pending_event(next, ring_id);
+ handle_tlb_pending_event(next, engine);
}
/**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
- * @ring_id: specify the engine
+ * @engine: the engine
*
* If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload.
*/
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
+ if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
+ engine->name))
return;
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
/**
* We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ switch_mmio(pre, next, engine);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
/**
@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
- gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index f7eaa442403f..970704b18f23 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -37,7 +37,7 @@
#define __GVT_RENDER_H__
struct engine_mmio {
- int ring_id;
+ enum intel_engine_id id;
i915_reg_t reg;
u32 mask;
bool in_context;
@@ -45,7 +45,8 @@ struct engine_mmio {
};
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id);
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine);
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 2369d4a9af94..036b74fe9298 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i;
struct intel_engine_cs *engine;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (!list_empty(workload_q_head(vgpu, i)))
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!list_empty(workload_q_head(vgpu, engine)))
return true;
}
@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
- for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i])
+ for_each_engine(engine, gvt->gt, i) {
+ if (scheduler->current_workload[engine->id])
return;
}
@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
- for_each_engine(engine, gvt->dev_priv, i)
- wake_up(&scheduler->waitq[i]);
+ for_each_engine(engine, gvt->gt, i)
+ wake_up(&scheduler->waitq[engine->id]);
}
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
- int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (!vgpu_data->active)
return;
@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
+ for_each_engine(engine, vgpu->gvt->gt, id) {
+ if (scheduler->engine_owner[engine->id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, engine);
+ scheduler->engine_owner[engine->id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e8c0885df978..1c95bf8cbed0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS0)
+ if (workload->engine->id != RCS0)
return;
if (save) {
@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS0) {
+ if (workload->engine->id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring %s workload lrca %x",
+ workload->engine->name,
+ workload->ctx_desc.lrca);
+ context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+ if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
return intel_context_force_single_submission(rq->context);
}
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ struct intel_uncore *uncore = engine->uncore;
i915_reg_t reg;
- reg = RING_INSTDONE(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD_UDW(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_INSTDONE(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD_UDW(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct i915_request *req = data;
+ struct i915_request *rq = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
- shadow_ctx_notifier_block[req->engine->id]);
+ shadow_ctx_notifier_block[rq->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- enum intel_engine_id ring_id = req->engine->id;
+ enum intel_engine_id ring_id = rq->engine->id;
struct intel_vgpu_workload *workload;
unsigned long flags;
- if (!is_gvt_request(req)) {
+ if (!is_gvt_request(rq)) {
spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- NULL, ring_id);
+ NULL, rq->engine);
scheduler->engine_owner[ring_id] = NULL;
}
spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- workload->vgpu, ring_id);
+ workload->vgpu, rq->engine);
scheduler->engine_owner[ring_id] = workload->vgpu;
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
atomic_set(&workload->shadow_ctx_active, 0);
break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
break;
default:
WARN_ON(1);
@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
- rq = i915_request_create(s->shadow[workload->ring_id]);
+ rq = i915_request_create(s->shadow[workload->engine->id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
return PTR_ERR(rq);
@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(s->shadow[workload->engine->id],
workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
return ret;
- if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+ if (workload->engine->id == RCS0 &&
+ workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadow = true;
return 0;
+
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base;
-
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
- vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+ vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+ workload->rb_start;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+ set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *rq;
- int ring_id = workload->ring_id;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+ workload->engine->name, workload);
mutex_lock(&vgpu->vgpu_lock);
@@ -710,8 +710,8 @@ out:
}
if (!IS_ERR_OR_NULL(workload->req)) {
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+ workload->engine->name, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -722,8 +722,8 @@ err_req:
return ret;
}
-static struct intel_vgpu_workload *pick_next_workload(
- struct intel_gvt *gvt, int ring_id)
+static struct intel_vgpu_workload *
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload(
* bail out
*/
if (!scheduler->current_vgpu) {
- gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
goto out;
}
if (scheduler->need_reschedule) {
- gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
goto out;
}
if (!scheduler->current_vgpu->active ||
- list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
- if (scheduler->current_workload[ring_id]) {
- workload = scheduler->current_workload[ring_id];
- gvt_dbg_sched("ring id %d still have current workload %p\n",
- ring_id, workload);
+ if (scheduler->current_workload[engine->id]) {
+ workload = scheduler->current_workload[engine->id];
+ gvt_dbg_sched("ring %s still have current workload %p\n",
+ engine->name, workload);
goto out;
}
@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload(
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
- scheduler->current_workload[ring_id] = container_of(
- workload_q_head(scheduler->current_vgpu, ring_id)->next,
- struct intel_vgpu_workload, list);
+ scheduler->current_workload[engine->id] =
+ list_first_entry(workload_q_head(scheduler->current_vgpu,
+ engine),
+ struct intel_vgpu_workload, list);
- workload = scheduler->current_workload[ring_id];
+ workload = scheduler->current_workload[engine->id];
- gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+ gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 ring_base;
u32 head, tail;
u16 wrap_count;
@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ ring_base = rq->engine->mmio_base;
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
mutex_unlock(&vgpu->vgpu_lock);
}
-struct workload_thread_param {
- struct intel_gvt *gvt;
- int ring_id;
-};
-
-static int workload_thread(void *priv)
+static int workload_thread(void *arg)
{
- struct workload_thread_param *p = (struct workload_thread_param *)priv;
- struct intel_gvt *gvt = p->gvt;
- int ring_id = p->ring_id;
+ struct intel_engine_cs *engine = arg;
+ const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
-
- kfree(p);
- gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+ gvt_dbg_core("workload thread for ring %s started\n", engine->name);
while (!kthread_should_stop()) {
- add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ intel_wakeref_t wakeref;
+
+ add_wait_queue(&scheduler->waitq[engine->id], &wait);
do {
- workload = pick_next_workload(gvt, ring_id);
+ workload = pick_next_workload(gvt, engine);
if (workload)
break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
- remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+ remove_wait_queue(&scheduler->waitq[engine->id], &wait);
if (!workload)
break;
- gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
- workload->ring_id, workload,
- workload->vgpu->id);
+ gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+ engine->name, workload,
+ workload->vgpu->id);
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(engine->uncore->rpm);
- gvt_dbg_sched("ring id %d will dispatch workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s will dispatch workload %p\n",
+ engine->name, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore,
+ FORCEWAKE_ALL);
/*
* Update the vReg of the vGPU which submitted this
* workload. The vGPU may use these registers for checking
@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv)
goto complete;
}
- gvt_dbg_sched("ring id %d wait workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s wait workload %p\n",
+ engine->name, workload);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
- workload, workload->status);
+ workload, workload->status);
- complete_current_workload(gvt, ring_id);
+ complete_current_workload(gvt, engine->id);
if (need_force_wake)
- intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore,
+ FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(rpm);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n");
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq);
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]);
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto err;
- }
-
- param->gvt = gvt;
- param->ring_id = i;
-
- scheduler->thread[i] = kthread_run(workload_thread, param,
- "gvt workload %d", i);
+ scheduler->thread[i] = kthread_run(workload_thread, engine,
+ "gvt:%s", engine->name);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
+
return 0;
+
err:
intel_gvt_clean_workload_scheduler(gvt);
- kfree(param);
- param = NULL;
return ret;
}
@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads);
@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt;
@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1283,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1310,6 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask,
unsigned int interface)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] =
@@ -1317,10 +1300,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
};
int ret;
- if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ if (drm_WARN_ON(&i915->drm,
+ interface == 0 && engine_mask != ALL_ENGINES))
return -EINVAL;
if (s->active)
@@ -1442,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
- * @ring_id: ring index
+ * @engine: the engine
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
@@ -1453,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
*
*/
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct list_head *q = workload_q_head(vgpu, engine);
struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
u32 guest_head;
@@ -1487,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
list_for_each_entry_reverse(last_workload, q, list) {
if (same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n",
- ring_id);
+ gvt_dbg_el("ring %s cur workload == last\n",
+ engine->name);
gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
+ last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
@@ -1500,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+ gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1520,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (IS_ERR(workload))
return workload;
- workload->ring_id = ring_id;
+ workload->engine = engine;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
@@ -1529,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1567,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+ workload, engine->name, head, tail, start, ctl);
ret = prepare_mm(workload);
if (ret) {
@@ -1579,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- ret = intel_gvt_scan_and_shadow_workload(workload);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ if (list_empty(q)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+ ret = intel_gvt_scan_and_shadow_workload(workload);
}
if (ret) {
@@ -1602,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
- workload_q_head(workload->vgpu, workload->ring_id));
+ workload_q_head(workload->vgpu, workload->engine));
intel_gvt_kick_schedule(workload->vgpu->gvt);
- wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c50d14a9ce85..bf7fc0ca4cb1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
bool ppgtt;
};
-#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->submission.workload_q_head[ring_id]))
+#define workload_q_head(vgpu, e) \
+ (&(vgpu)->submission.workload_q_head[(e)->id])
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 487af6ea9972..78f14f04d2ea 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -37,6 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
@@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
- WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+ drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
#define VGPU_MAX_WEIGHT 16
@@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->dev_priv, 8))
+ if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (IS_GEN(gvt->dev_priv, 9))
+ vgpu_types[i].name);
+ else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ vgpu_types[i].name);
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
@@ -271,10 +272,11 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
mutex_lock(&vgpu->vgpu_lock);
- WARN(vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
@@ -426,9 +428,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- /*TODO: add more platforms support */
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 992b00fc5745..c4048628188a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -452,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
{
struct dma_fence *fence;
+ if (unlikely(is_barrier(active)))
+ return;
+
fence = i915_active_fence_get(active);
if (!fence)
return;
@@ -460,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
{
- struct active_node *it, *n;
- int err = 0;
+ struct intel_engine_cs *engine;
- might_sleep();
+ if (likely(!is_barrier(&it->base)))
+ return 0;
- if (!i915_active_acquire_if_busy(ref))
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
return 0;
- /* Flush lazy signals */
+ return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int err = 0;
+
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) /* unconnected idle barrier */
- continue;
+ err = flush_barrier(it); /* unconnected idle barrier? */
+ if (err)
+ break;
enable_signaling(&it->base);
}
- /* Any fence added after the wait begins will not be auto-signaled */
+ return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ int err;
+
+ might_sleep();
+
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ /* Any fence added after the wait begins will not be auto-signaled */
+ err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
@@ -491,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
+{
+ struct dma_fence *fence;
+
+ if (is_barrier(active)) /* XXX flush the barrier? */
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = fn(arg, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int await_active(struct i915_active *ref,
+ unsigned int flags,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
{
int err = 0;
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
+ err = __await_active(&ref->excl, fn, arg);
+ if (err)
+ return err;
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = __await_active(&it->base, fn, arg);
+ if (err)
+ break;
}
+ i915_active_release(ref);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ return 0;
+}
+
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_request_await_dma_fence(arg, fence);
+}
- return err;
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_sw_fence_await_dma_fence(arg, fence, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, sw_await_fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 973ff0447c6c..b3282ae7913c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -183,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af64ca1..20babbdb297d 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
return block;
out_free:
- __i915_buddy_free(mm, block);
+ if (i != order)
+ __i915_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8f2525e4ce0f..6ca797128aa1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -996,220 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ilk_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
- rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
- crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
- seq_printf(m, "Boost freq: %d\n",
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
- MEMMODE_BOOST_FREQ_SHIFT);
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
- seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
- seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
- seq_printf(m, "Starting frequency: P%d\n",
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max P-state: P%d\n",
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
- seq_puts(m, "Current RS state: ");
- switch (rstdbyctl & RSX_STATUS_MASK) {
- case RSX_STATUS_ON:
- seq_puts(m, "on\n");
- break;
- case RSX_STATUS_RC1:
- seq_puts(m, "RC1\n");
- break;
- case RSX_STATUS_RC1E:
- seq_puts(m, "RC1E\n");
- break;
- case RSX_STATUS_RS1:
- seq_puts(m, "RS1\n");
- break;
- case RSX_STATUS_RS2:
- seq_puts(m, "RS2 (RC6)\n");
- break;
- case RSX_STATUS_RS3:
- seq_puts(m, "RC3 (RC6+)\n");
- break;
- default:
- seq_puts(m, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_uncore_forcewake_domain *fw_domain;
- unsigned int tmp;
-
- seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake_count);
-
- for_each_fw_domain(fw_domain, uncore, tmp)
- seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
- READ_ONCE(fw_domain->wake_count));
-
- return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
- const char *title,
- const i915_reg_t reg)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- seq_printf(m, "%s %u (%llu us)\n", title,
- intel_uncore_read(&i915->uncore, reg),
- intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rcctl1, pw_status;
-
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_EI_MODE(1))));
- seq_printf(m, "Render Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 gt_core_status, rcctl1, rc6vids = 0;
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
- }
-
- if (INTEL_GEN(dev_priv) <= 7)
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
-
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
- seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
- }
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_puts(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_puts(m, "Core Power Down\n");
- else
- seq_puts(m, "on\n");
- break;
- case GEN6_RC3:
- seq_puts(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_puts(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_puts(m, "RC7\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
- }
-
- /* Not exactly sure what this is */
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
- GEN6_GT_GFX_RC6_LOCKED);
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
- if (INTEL_GEN(dev_priv) <= 7) {
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- }
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- int err = -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ilk_drpc_info(m);
- }
-
- return err;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2360,10 +2146,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dba5fe1391e8..82d9df15b22b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,6 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
@@ -71,6 +70,7 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_ioc32.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_perf.h"
@@ -80,6 +80,8 @@
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
#include "vlv_suspend.h"
@@ -238,8 +240,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
intel_csr_ucode_init(i915);
+ ret = intel_modeset_init_noirq(i915);
+ if (ret)
+ goto cleanup_vga_client;
+
return 0;
+cleanup_vga_client:
+ intel_vga_unregister(i915);
out:
return ret;
}
@@ -381,6 +389,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -459,7 +468,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -558,494 +566,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
- static const char * const str[] = {
- DRAM_TYPE_STR(UNKNOWN),
- DRAM_TYPE_STR(DDR3),
- DRAM_TYPE_STR(DDR4),
- DRAM_TYPE_STR(LPDDR3),
- DRAM_TYPE_STR(LPDDR4),
- };
-
- if (type >= ARRAY_SIZE(str))
- type = INTEL_DRAM_UNKNOWN;
-
- return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
- return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
- return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & SKL_DRAM_WIDTH_MASK) {
- case SKL_DRAM_WIDTH_X8:
- case SKL_DRAM_WIDTH_X16:
- case SKL_DRAM_WIDTH_X32:
- val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
- return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & CNL_DRAM_WIDTH_MASK) {
- case CNL_DRAM_WIDTH_X8:
- case CNL_DRAM_WIDTH_X16:
- case CNL_DRAM_WIDTH_X32:
- val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
- /* Convert total GB to Gb per DRAM device */
- return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
- struct dram_dimm_info *dimm,
- int channel, char dimm_name, u16 val)
-{
- if (INTEL_GEN(dev_priv) >= 10) {
- dimm->size = cnl_get_dimm_size(val);
- dimm->width = cnl_get_dimm_width(val);
- dimm->ranks = cnl_get_dimm_ranks(val);
- } else {
- dimm->size = skl_get_dimm_size(val);
- dimm->width = skl_get_dimm_width(val);
- dimm->ranks = skl_get_dimm_ranks(val);
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
- channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
- struct dram_channel_info *ch,
- int channel, u32 val)
-{
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
- channel, 'L', val & 0xffff);
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
- channel, 'S', val >> 16);
-
- if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
- drm_dbg_kms(&dev_priv->drm, "CH%u not populated\n", channel);
- return -EINVAL;
- }
-
- if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
- ch->ranks = 2;
- else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
- ch->ranks = 2;
- else
- ch->ranks = 1;
-
- ch->is_16gb_dimm =
- skl_is_16gb_dimm(&ch->dimm_l) ||
- skl_is_16gb_dimm(&ch->dimm_s);
-
- drm_dbg_kms(&dev_priv->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
- return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
- const struct dram_channel_info *ch1)
-{
- return !memcmp(ch0, ch1, sizeof(*ch0)) &&
- (ch0->dimm_s.size == 0 ||
- !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0 = {}, ch1 = {};
- u32 val;
- int ret;
-
- val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- if (dram_info->num_channels == 0) {
- drm_info(&dev_priv->drm,
- "Number of memory channels is zero\n");
- return -EINVAL;
- }
-
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
- drm_info(&dev_priv->drm,
- "couldn't get memory rank information\n");
- return -EINVAL;
- }
-
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
- dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
- drm_dbg_kms(&dev_priv->drm, "Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
- return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
- switch (val & SKL_DRAM_DDR_TYPE_MASK) {
- case SKL_DRAM_DDR_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case SKL_DRAM_DDR_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case SKL_DRAM_DDR_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case SKL_DRAM_DDR_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 mem_freq_khz, val;
- int ret;
-
- dram_info->type = skl_get_dram_type(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "DRAM type: %s\n",
- intel_dram_type_str(dram_info->type));
-
- ret = skl_dram_get_channels_info(dev_priv);
- if (ret)
- return ret;
-
- val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
- drm_info(&dev_priv->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
- switch (val & BXT_DRAM_SIZE_MASK) {
- case BXT_DRAM_SIZE_4GBIT:
- return 4;
- case BXT_DRAM_SIZE_6GBIT:
- return 6;
- case BXT_DRAM_SIZE_8GBIT:
- return 8;
- case BXT_DRAM_SIZE_12GBIT:
- return 12;
- case BXT_DRAM_SIZE_16GBIT:
- return 16;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
- return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- switch (val & BXT_DRAM_RANK_MASK) {
- case BXT_DRAM_RANK_SINGLE:
- return 1;
- case BXT_DRAM_RANK_DUAL:
- return 2;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return INTEL_DRAM_UNKNOWN;
-
- switch (val & BXT_DRAM_TYPE_MASK) {
- case BXT_DRAM_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case BXT_DRAM_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case BXT_DRAM_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case BXT_DRAM_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
- u32 val)
-{
- dimm->width = bxt_get_dimm_width(val);
- dimm->ranks = bxt_get_dimm_ranks(val);
-
- /*
- * Size in register is Gb per DRAM device. Convert to total
- * GB to match the way we report this for non-LP platforms.
- */
- dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels;
- int i;
-
- val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
- drm_info(&dev_priv->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- /*
- * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
- */
- for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- struct dram_dimm_info dimm;
- enum intel_dram_type type;
-
- val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
- if (val == 0xFFFFFFFF)
- continue;
-
- dram_info->num_channels++;
-
- bxt_get_dimm_info(&dimm, val);
- type = bxt_get_dimm_type(val);
-
- drm_WARN_ON(&dev_priv->drm, type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != type);
-
- drm_dbg_kms(&dev_priv->drm,
- "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
- i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
-
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
-
- if (type != INTEL_DRAM_UNKNOWN)
- dram_info->type = type;
- }
-
- if (dram_info->type == INTEL_DRAM_UNKNOWN ||
- dram_info->ranks == 0) {
- drm_info(&dev_priv->drm, "couldn't get memory information\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- int ret;
-
- /*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
- */
- dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
- if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
- return;
-
- if (IS_GEN9_LP(dev_priv))
- ret = bxt_get_dram_info(dev_priv);
- else
- ret = skl_get_dram_info(dev_priv);
- if (ret)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps,
- dram_info->num_channels);
-
- drm_dbg_kms(&dev_priv->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
- static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- static const u8 sets[4] = { 1, 1, 2, 2 };
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
- u32 edram_cap = 0;
-
- if (!(IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9))
- return;
-
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
- if (!(edram_cap & EDRAM_ENABLED))
- return;
-
- /*
- * The needed capability bits for size calculation are not there with
- * pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- dev_priv->edram_size_mb = 128;
- else
- dev_priv->edram_size_mb =
- gen9_edram_size_mb(dev_priv, edram_cap);
-
- dev_info(dev_priv->drm.dev,
- "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -1089,7 +609,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
- edram_detect(dev_priv);
+ intel_dram_edram_detect(dev_priv);
i915_perf_init(dev_priv);
@@ -1191,7 +711,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
- intel_get_dram_info(dev_priv);
+ intel_dram_detect(dev_priv);
intel_bw_init_hw(dev_priv);
@@ -1240,12 +760,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev_priv))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+ intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
@@ -1375,8 +890,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.dev_private = i915;
-
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -1454,7 +967,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_detect_vgpu(i915);
+ intel_vgpu_detect(i915);
ret = i915_driver_mmio_probe(i915);
if (ret < 0)
@@ -2245,7 +1758,7 @@ static const struct file_operations i915_driver_fops = {
.mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ea13fc0b409b..1f5b9a584f71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
-#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -105,18 +104,23 @@
#include "intel_region_lmem.h"
-#include "intel_gvt.h"
-
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200225"
-#define DRIVER_TIMESTAMP 1582656081
+#define DRIVER_DATE "20200313"
+#define DRIVER_TIMESTAMP 1584144591
struct drm_i915_gem_object;
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -732,14 +736,6 @@ enum intel_ddb_partitioning {
INTEL_DDB_PART_5_6, /* IVB+ */
};
-struct intel_wm_level {
- bool enable;
- u32 pri_val;
- u32 spr_val;
- u32 cur_val;
- u32 fbc_val;
-};
-
struct ilk_wm_values {
u32 wm_pipe[3];
u32 wm_lp[3];
@@ -798,56 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-struct skl_wm_level {
- u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
- bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
- INTEL_PIPE_CRC_SOURCE_NONE,
- INTEL_PIPE_CRC_SOURCE_PLANE1,
- INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PLANE3,
- INTEL_PIPE_CRC_SOURCE_PLANE4,
- INTEL_PIPE_CRC_SOURCE_PLANE5,
- INTEL_PIPE_CRC_SOURCE_PLANE6,
- INTEL_PIPE_CRC_SOURCE_PLANE7,
- INTEL_PIPE_CRC_SOURCE_PIPE,
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
- INTEL_PIPE_CRC_SOURCE_TV,
- INTEL_PIPE_CRC_SOURCE_DP_B,
- INTEL_PIPE_CRC_SOURCE_DP_C,
- INTEL_PIPE_CRC_SOURCE_DP_D,
- INTEL_PIPE_CRC_SOURCE_AUTO,
- INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
-struct intel_pipe_crc {
- spinlock_t lock;
- int skipped;
- enum intel_pipe_crc_source source;
-};
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -865,13 +811,6 @@ struct i915_virtual_gpu {
u32 caps;
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
@@ -1043,21 +982,24 @@ struct drm_i915_private {
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-#ifdef CONFIG_DEBUG_FS
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
+ /**
+ * dpll and cdclk state is protected by connection_mutex
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms plls
+ * share registers.
+ */
+ struct {
+ struct mutex lock;
- /* dpll and cdclk state is protected by connection_mutex */
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *dpll_mgr;
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
- /*
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms
- * plls share registers.
- */
- struct mutex dpll_lock;
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+ } dpll;
struct list_head global_obj_list;
@@ -1078,8 +1020,6 @@ struct drm_i915_private {
struct work_struct free_work;
} atomic_helper;
- u16 orig_clock;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -1274,16 +1214,6 @@ struct drm_i915_private {
*/
};
-struct dram_dimm_info {
- u8 size, width, ranks;
-};
-
-struct dram_channel_info {
- struct dram_dimm_info dimm_l, dimm_s;
- u8 ranks;
- bool is_16gb_dimm;
-};
-
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -1554,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
+#define GLK_REVID_A2 0x2
+#define GLK_REVID_B0 0x3
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1737,11 +1669,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1750,16 +1677,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.active;
-}
-
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1824,12 +1741,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d92cf966fa3f..ca5420012a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bedebeef..4518b9b35c3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
*
*/
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
@@ -292,7 +290,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
- /* If we are using coloring to insert guard pages between
+ /*
+ * If we are using coloring to insert guard pages between
* different cache domains within the address space, we have
* to check whether the objects on either side of our range
* abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +308,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
}
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
break;
}
- /* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma)) {
+ if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
ret = -ENOSPC;
- if (vma->exec_flags &&
- *vma->exec_flags & EXEC_OBJECT_PINNED)
- ret = -EINVAL;
break;
}
- /* Never show fear in the face of dragons!
+ /*
+ * Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 049cd3785347..d152b648c73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
* IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e7834fa1e0ac..cb43381b0d37 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c1007245f46d..8e45ca3d2ede 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
*/
#include <linux/compat.h>
-#include <drm/i915_drm.h>
#include <drm/drm_ioctl.h>
+
#include "i915_drv.h"
+#include "i915_ioc32.h"
struct drm_i915_getparam32 {
s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
* @filp: the file pointer
* @cmd: the ioctl command (and encoded flags)
* @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*/
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 000000000000..40dcd55ca213
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1b11346145..9f0653cf0510 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_handle_vblank(&crtc->base);
+}
+
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -1210,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1374,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1392,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1416,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1442,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1749,11 +1756,12 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_POISON)
drm_err(&dev_priv->drm, "PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK)
+ if (pch_iir & SDE_FDI_MASK) {
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
@@ -1833,11 +1841,12 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
- if (pch_iir & SDE_FDI_MASK_CPT)
+ if (pch_iir & SDE_FDI_MASK_CPT) {
for_each_pipe(dev_priv, pipe)
drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
@@ -1978,7 +1987,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2031,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
}
/* check event from PCH */
@@ -2344,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1d678aa7d420..2c80a0194c80 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
#include "display/intel_fbdev.h"
@@ -822,7 +823,6 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 79391d92ab7e..1b074bb4a7fe 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1405,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2200,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce,
struct i915_request *rq;
int err;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2869,7 +2873,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->exclusive_stream = stream;
+ WRITE_ONCE(perf->exclusive_stream, stream);
ret = i915_perf_stream_enable_sync(stream);
if (ret) {
@@ -2889,7 +2893,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2915,12 +2919,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
{
struct i915_perf_stream *stream;
- /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.exclusive_stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->i915->perf.exclusive_stream);
/*
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
* is already doing that, so nothing to be done for gen12 here.
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index f1d6cad0d7d5..941f0c14037c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
#include <linux/hrtimer.h>
#include <linux/perf_event.h>
#include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f45b5e86ec63..309cb7d96b35 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3285,6 +3285,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
@@ -4860,16 +4861,6 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON REG_BIT(31)
-
-#define _PP_CONTROL_1 0xc7204
-#define _PP_CONTROL_2 0xc7304
-#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
- _PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
-#define VDD_OVERRIDE_FORCE REG_BIT(3)
-#define BACKLIGHT_ENABLE REG_BIT(2)
-#define PWR_DOWN_ON_RESET REG_BIT(1)
-#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -5880,7 +5871,6 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
@@ -5889,6 +5879,7 @@ enum {
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
#define PIPEMISC_DITHER_10_BPC (1 << 5)
@@ -9149,14 +9140,19 @@ enum {
#define THROTTLE_12_5 (7 << 2)
#define DISABLE_EARLY_EOT (1 << 1)
-#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
-#define GEN12_DISABLE_EARLY_READ BIT(14)
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9257,6 +9253,10 @@ enum {
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index e5a55801f753..c0df71d7d0ff 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -290,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del_rcu(&rq->link);
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -363,6 +363,50 @@ __await_execution(struct i915_request *rq,
return 0;
}
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
+ }
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -392,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (intel_context_is_banned(request->context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -519,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
trace_i915_request_submit(request);
if (unlikely(fence->error))
- i915_request_skip(request, fence->error);
+ i915_request_set_error_once(request, fence->error);
/*
* We need to serialize use of the submit_request() callback
@@ -542,19 +588,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq =
+ container_of(wrk, typeof(*rq), semaphore_work);
+
+ i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+ i915_request_put(rq);
+}
+
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct i915_request *request =
- container_of(fence, typeof(*request), semaphore);
+ struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
- i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+ if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+ i915_request_get(rq);
+ init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+ irq_work_queue(&rq->semaphore_work);
+ }
break;
case FENCE_FREE:
- i915_request_put(request);
+ i915_request_put(rq);
break;
}
@@ -691,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
+ GEM_BUG_ON(i915_request_completed(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -791,8 +850,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence;
int err;
- GEM_BUG_ON(i915_request_timeline(rq) ==
- rcu_access_pointer(signal->timeline));
+ if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+ return 0;
if (i915_request_started(signal))
return 0;
@@ -836,7 +895,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
return 0;
err = 0;
- if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0,
I915_FENCE_GFP);
@@ -860,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
@@ -918,6 +977,8 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -925,7 +986,7 @@ emit_semaphore_wait(struct i915_request *to,
goto await_fence;
/* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
+ if (already_busywaiting(to) & mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
@@ -938,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= from->engine->mask;
+ to->sched.semaphores |= mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -1081,14 +1142,45 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence))
return 0;
- /* Ensure both start together [after all semaphores in signal] */
- if (intel_engine_has_semaphores(to->engine))
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- else
- err = i915_request_await_start(to, from);
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
if (err < 0)
return err;
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
@@ -1209,23 +1301,6 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
-void i915_request_skip(struct i915_request *rq, int error)
-{
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
-
- if (rq->infix == rq->postfix)
- return;
-
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- __i915_request_fill(rq, 0);
- rq->infix = rq->postfix;
-}
-
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
@@ -1255,7 +1330,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(prev->context == rq->context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1329,9 +1414,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
+ i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}
@@ -1339,39 +1424,23 @@ void i915_request_add(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
- struct i915_request *prev;
+ struct i915_gem_context *ctx;
lockdep_assert_held(&tl->mutex);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
-
- if (rcu_access_pointer(rq->context->gem_context))
- attr = i915_request_gem_context(rq)->sched;
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
@@ -1379,32 +1448,10 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev &&
- i915_request_completed(prev) &&
- rcu_access_pointer(prev->timeline) == tl)
- i915_request_retire_upto(prev);
-
mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1421,7 +1468,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1431,15 +1478,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1467,7 +1514,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
if (i915_request_completed(rq))
return true;
@@ -1475,7 +1523,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1561,8 +1609,8 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ __i915_spin_request(rq, state)) {
dma_fence_signal(&rq->fence);
goto out;
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index da8420f03232..3c552bfea67a 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -208,6 +209,7 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
+ struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -303,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
@@ -352,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
-void i915_request_skip(struct i915_request *request, int error);
-
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -395,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- return READ_ONCE(*rq->hwsp_seqno);
+ const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+ return READ_ONCE(*hwsp);
}
/**
@@ -509,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
static inline void i915_request_mark_complete(struct i915_request *rq)
{
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+ WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+ (u32 *)&rq->fence.seqno);
}
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index be770f2419b1..68b06a7ba667 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -227,10 +228,10 @@ unlock:
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
+ const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
- const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (prio <= READ_ONCE(node->attr.priority))
- return;
-
if (node_signaled(node))
return;
@@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
- node->attr.priority = prio;
+ WRITE_ONCE(node->attr.priority, prio);
/*
* Once the request is ready, it will be placed into the
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd9007f..ed2be3489f8e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c14d762bd652..45d32ef42787 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
#include "i915_drv.h"
#include "i915_sysfs.h"
@@ -606,6 +607,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+ intel_engines_add_sysfs(dev_priv);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 632d6953c78d..029854ae65fc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,7 +8,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b0ade76bec90..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
struct drm_i915_private;
struct timer_list;
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
+ start__ >= max__ || size__ > max__ - start__; \
})
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
+#define range_overflows_end(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+ range_overflows_end((type)(start), (type)(size), (type)(max))
+
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -234,6 +248,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
+static inline bool is_power_of_2_u64(u64 n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
@@ -241,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 4afe21662266..70fca72f5162 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -51,13 +53,13 @@
*/
/**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
@@ -102,11 +104,36 @@ out:
pci_iounmap(pdev, shared_area);
}
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+ /*
+ * Notify a valid surface after modesetting, when running inside a VM.
+ */
+ if (intel_vgpu_active(i915))
+ intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+ VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.active;
+}
+
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663dad193..ffbb77d08048 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
int intel_vgt_balloon(struct i915_ggtt *ggtt);
void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 298ca4316e65..5b3efb43a8ef 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -641,7 +641,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
- GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -1174,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942efd5236..63831cdb7402 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
struct rb_node obj_node;
struct hlist_node obj_hash;
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
/** This vma's place in the eviction list */
struct list_head evict_link;
struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 8e99ad097830..d7fe12734db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,6 +23,7 @@
*/
#include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
#include "display/intel_cdclk.h"
#include "intel_device_info.h"
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 000000000000..6b922efb1d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(i915) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
+ else
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+ if (dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+ if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ return;
+
+ if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps, dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ dev_info(i915->drm.dev,
+ "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 000000000000..4ba13c13162c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e73fd752adef..21b91313cc5d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
*/
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gvt.h"
/**
@@ -124,6 +125,11 @@ bail:
return 0;
}
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt;
+}
+
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 22aa205793e5..8375054ba27d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
#include "gt/intel_llc.h"
#include "i915_drv.h"
+#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /* WaDDIIOTimeout:glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
- u32 val = I915_READ(CHICKEN_MISC_2);
- val &= ~(GLK_CL0_PWR_DOWN |
- GLK_CL1_PWR_DOWN |
- GLK_CL2_PWR_DOWN);
- I915_WRITE(CHICKEN_MISC_2, val);
- }
-
}
static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
}
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *intel_crtc,
+ const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
@@ -3107,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3147,7 +3161,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
@@ -3158,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3843,7 +3857,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
}
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u32 active_pipes);
+ u8 active_pipes);
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
@@ -4184,50 +4198,51 @@ static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1)
- }
+ [PIPE_A] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1)
- }
+ [PIPE_B] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2)
- }
+ [PIPE_B] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
+ {}
};
/*
@@ -4246,100 +4261,100 @@ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
- }
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
- }
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S1)
- }
+ [PIPE_B] = BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_C),
.dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
- }
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2)
- }
+ [PIPE_C] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_D),
.dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
- }
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_C] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
@@ -4347,19 +4362,18 @@ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
[PIPE_A] = BIT(DBUF_S1),
[PIPE_B] = BIT(DBUF_S1),
[PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2)
- }
+ [PIPE_D] = BIT(DBUF_S2),
+ },
},
+ {}
};
-static u8 compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes,
- const struct dbuf_slice_conf_entry *dbuf_slices,
- int size)
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
- for (i = 0; i < size; i++) {
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].dbuf_mask[pipe];
}
@@ -4371,8 +4385,7 @@ static u8 compute_dbuf_slices(enum pipe pipe,
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u32 icl_compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4386,32 +4399,25 @@ static u32 icl_compute_dbuf_slices(enum pipe pipe,
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes,
- icl_allowed_dbufs,
- ARRAY_SIZE(icl_allowed_dbufs));
+ return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
}
-static u32 tgl_compute_dbuf_slices(enum pipe pipe,
- u32 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
{
- return compute_dbuf_slices(pipe, active_pipes,
- tgl_allowed_dbufs,
- ARRAY_SIZE(tgl_allowed_dbufs));
+ return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
}
static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
- u32 active_pipes)
+ u8 active_pipes)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_GEN(dev_priv, 12))
- return tgl_compute_dbuf_slices(pipe,
- active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes);
else if (IS_GEN(dev_priv, 11))
- return icl_compute_dbuf_slices(pipe,
- active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
@@ -4470,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4505,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->uapi.state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4548,10 +4547,8 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
@@ -4568,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (drm_WARN_ON(&dev_priv->drm, !state))
- return 0;
-
if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
@@ -4609,7 +4603,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
*/
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4646,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* watermark level, plus an extra share of the leftover blocks
* proportional to its relative data rate.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
@@ -4685,7 +4679,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
@@ -4719,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* that aren't actually possible.
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4756,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
* Go back and disable the transition watermark if it turns out we
* don't have enough DDB blocks for it.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -5126,21 +5120,30 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- u16 trans_min, trans_y_tile_min;
- const u16 trans_amount = 10; /* This is configurable amount */
+ u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
- /* Transition WM are not recommended by HW team for GEN9 */
- if (INTEL_GEN(dev_priv) <= 9)
- return;
-
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
return;
- trans_min = 14;
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
trans_offset_b = trans_min + trans_amount;
@@ -5167,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
-
}
/*
@@ -5410,8 +5412,12 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
- !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
@@ -5768,16 +5774,24 @@ skl_compute_wm(struct intel_atomic_state *state)
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
-
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
}
ret = skl_compute_ddb(state);
if (ret)
return ret;
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
skl_print_wm_changes(state);
return 0;
@@ -6812,21 +6826,6 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
- /* WaEnable32PlaneMode:icl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
- /*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
- 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /* Wa_1407352427:icl,ehl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, PSDUNIT_CLKGATE_DIS);
-
/*Wa_14010594013:icl, ehl */
intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
0, CNL_DELAY_PMRSP);
@@ -6837,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
- /* Wa_1408615072:tgl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, VSUNIT_CLKGATE_DIS_TGL);
-
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0c2566..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+
+ if (err == 0)
+ err = i915_active_wait(&active->base);
+
+ if (err == 0 && !READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
spin_unlock_irq(lock);
}
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
+ /* Wait for all active callbacks */
rcu_read_lock();
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- struct dma_fence *f;
-
- /* Wait for all active callbacks */
- f = rcu_dereference(it->base.fence);
- if (f)
- spin_unlock_wait(f->lock);
- }
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
- spin_lock_irq(&ref->tree_lock);
- spin_unlock_irq(&ref->tree_lock);
+ spin_unlock_wait(&ref->tree_lock);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..939a6caebb03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
- int max_order;
+ IGT_TIMEOUT(end_time);
+ I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
- int err;
+ int *order;
+ int err, i;
igt_mm_config(&mm_size, &chunk_size);
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
return err;
}
- for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ order = i915_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
- int order;
+ int max_order = order[i];
+ bool timeout = false;
LIST_HEAD(blocks);
+ int order;
u64 total;
err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ retry:
}
total += i915_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
} while (total < mm.size);
if (!err)
@@ -373,7 +386,7 @@ retry:
pr_err("post-mm check failed\n");
}
- if (err)
+ if (err || timeout)
break;
cond_resched();
@@ -382,6 +395,8 @@ retry:
if (err == -ENOMEM)
err = 0;
+ kfree(order);
+out_fini:
i915_buddy_fini(&mm);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7bdd15..0a953bfc0585 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe49c39..9ad4ab088466 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..754d0eb6beaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
goto put_device;
}
i915->drm.pdev = pdev;
- i915->drm.dev_private = i915;
intel_runtime_pm_init_early(&i915->runtime_pm);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index b8f6bac6341c..c6119e4c169a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1498,13 +1498,16 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+u32 drm_dp_get_edid_quirks(const struct edid *edid);
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
* Display Port sink and branch devices in the wild have a variety of bugs, try
* to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
+ * implement workarounds for them. Note that because some devices have
+ * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
+ * drm_dp_get_edid_quirks().
*/
enum drm_dp_quirk {
/**
@@ -1535,19 +1538,31 @@ enum drm_dp_quirk {
* The DSC caps can be read from the physical aux instead.
*/
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
+ *
+ * The device is telling the truth when it says that it uses DPCD
+ * backlight controls, even if the system's firmware disagrees. This
+ * quirk should be checked against both the ident and panel EDID.
+ * When present, the driver should honor the DPCD backlight
+ * capabilities advertised.
+ */
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT,
};
/**
* drm_dp_has_quirk() - does the DP device have a specific quirk
* @desc: Device decriptor filled by drm_dp_read_desc()
+ * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
* @quirk: Quirk to query for
*
* Return true if DP device identified by @desc has @quirk.
*/
static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
+ enum drm_dp_quirk quirk)
{
- return desc->quirks & BIT(quirk);
+ return (desc->quirks | edid_quirks) & BIT(quirk);
}
#ifdef CONFIG_DRM_DP_CEC
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 4d48de8890ca..702f613243bb 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME FW
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a48577f..2813e579b480 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param {
* By default, new contexts allow persistence.
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/*
+ * I915_CONTEXT_PARAM_RINGSIZE:
+ *
+ * Sets the size of the CS ringbuffer to use for logical ring contexts. This
+ * applies a limit of how many batches can be queued to HW before the caller
+ * is blocked due to lack of space for more commands.
+ *
+ * Only reliably possible to be set prior to first use, i.e. during
+ * construction. At any later point, the current execution must be flushed as
+ * the ring can only be changed while the context is idle. Note, the ringsize
+ * can be specified as a constructor property, see
+ * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
+ *
+ * Only applies to the current set of engine and lost when those engines
+ * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
+ *
+ * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
+ * Default is 16 KiB.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
/* Must be kept compact -- no holes and well documented */
__u64 value;