diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2019-09-06 09:52:43 -0700 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2019-09-06 09:52:44 -0700 |
commit | 0ac072cced08390b9b31fe3c56c364c3ef2f6d6a (patch) | |
tree | 817268a6d1cc3ad071a753461aff873864ae5a1c /drivers/gpu/drm/i915/gvt | |
parent | 578d2342ec702e5fb8a77983fabb3754ae3e9660 (diff) | |
parent | 4a5322560aa235efa84c0aa34c00e5749a0792fd (diff) | |
download | linux-0ac072cced08390b9b31fe3c56c364c3ef2f6d6a.tar.gz linux-0ac072cced08390b9b31fe3c56c364c3ef2f6d6a.tar.bz2 linux-0ac072cced08390b9b31fe3c56c364c3ef2f6d6a.zip |
Merge tag 'gvt-next-fixes-2019-09-06' of https://github.com/intel/gvt-linux into drm-intel-next-fixes
gvt-next-fixes-2019-09-06
- Fix guest context head pointer update for hang (Xiaolin)
- Fix guest context ring state for reset (Weinan)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190906054255.GC3458@zhen-hp.sh.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 45 |
1 files changed, 32 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1a28e3666951..37f502ca95ad 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -568,6 +568,16 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } +static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u32 ring_base; + + ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start; +} + static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -1016,6 +1026,13 @@ static int workload_thread(void *priv) if (need_force_wake) intel_uncore_forcewake_get(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + /* + * Update the vReg of the vGPU which submitted this + * workload. The vGPU may use these registers for checking + * the context state. The value comes from GPU commands + * in this workload. + */ + update_vreg_in_ctx(workload); ret = dispatch_workload(workload); @@ -1438,9 +1455,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU @@ -1460,7 +1474,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; @@ -1486,15 +1500,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; + list_for_each_entry_reverse(last_workload, q, list) { + + if (same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", + ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + break; + } } gvt_dbg_el("ring id %d begin a new workload\n", ring_id); |