summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_guc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_guc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c152
1 files changed, 137 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 10037c0fdf95..3c6bf5a34c3c 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -23,6 +23,7 @@
*/
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
static void gen8_guc_raise_irq(struct intel_guc *guc)
@@ -60,6 +61,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
mutex_init(&guc->send_mutex);
@@ -67,6 +69,114 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
+int intel_guc_init_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq)
+ return -ENOMEM;
+
+ /*
+ * Even though both sending GuC action, and adding a new workitem to
+ * GuC workqueue are serialized (each with its own locking), since
+ * we're using mutliple engines, it's possible that we're going to
+ * issue a preempt request with two (or more - each for different
+ * engine) workitems in GuC queue. In this situation, GuC may submit
+ * all of them, which will make us very confused.
+ * Our preemption contexts may even already be complete - before we
+ * even had the chance to sent the preempt action to GuC!. Rather
+ * than introducing yet another lock, we can just use ordered workqueue
+ * to make sure we're always sending a single preemption request with a
+ * single workitem.
+ */
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv)) {
+ guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
+ WQ_HIGHPRI);
+ if (!guc->preempt_wq) {
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_fini_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv))
+ destroy_workqueue(guc->preempt_wq);
+
+ destroy_workqueue(guc->log.runtime.flush_wq);
+}
+
+static int guc_shared_data_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->shared_data = vma;
+ guc->shared_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void guc_shared_data_destroy(struct intel_guc *guc)
+{
+ i915_gem_object_unpin_map(guc->shared_data->obj);
+ i915_vma_unpin_and_release(&guc->shared_data);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(!guc->shared_data);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ return 0;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ i915_ggtt_disable_guc(dev_priv);
+ guc_shared_data_destroy(guc);
+}
+
static u32 get_gt_type(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -127,7 +237,7 @@ void intel_guc_init_params(struct intel_guc *guc)
}
/* If GuC submission is enabled, set up additional parameters here */
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
@@ -230,8 +340,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_rc6_enabled() ||
- NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
action[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -268,7 +377,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -276,14 +384,33 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
gen9_disable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_reset_engine() - ask GuC to reset an engine
+ * @guc: intel_guc structure
+ * @engine: engine to be reset
+ */
+int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine)
+{
+ u32 data[7];
+
+ GEM_BUG_ON(!guc->execbuf_client);
+
+ data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
+ data[1] = engine->guc_id;
+ data[2] = 0;
+ data[3] = 0;
+ data[4] = 0;
+ data[5] = guc->execbuf_client->stage_id;
+ data[6] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
@@ -295,7 +422,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
int intel_guc_resume(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -304,13 +430,9 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
- ctx = dev_priv->kernel_context;
-
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
- LRC_GUCSHR_PN * PAGE_SIZE;
+ data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}