summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_context.c
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2015-03-19 12:53:28 +0000
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-03-20 11:48:18 +0100
commit563222a745012cc2bb20c5d5cbff1c1ec4832c05 (patch)
tree25b4d8d86ef196ffd2f209b3c2171abef0aa4076 /drivers/gpu/drm/i915/i915_gem_context.c
parent678d96fbb3b5995a2fdff2bca5e1ab4a40b7e968 (diff)
downloadlinux-563222a745012cc2bb20c5d5cbff1c1ec4832c05.tar.gz
linux-563222a745012cc2bb20c5d5cbff1c1ec4832c05.tar.bz2
linux-563222a745012cc2bb20c5d5cbff1c1ec4832c05.zip
drm/i915: Track page table reload need
This patch was formerly known as, "Force pd restore when PDEs change, gen6-7." I had to change the name because it is needed for GEN8 too. The real issue this is trying to solve is when a new object is mapped into the current address space. The GPU does not snoop the new mapping so we must do the gen specific action to reload the page tables. GEN8 and GEN7 do differ in the way they load page tables for the RCS. GEN8 does so with the context restore, while GEN7 requires the proper load commands in the command streamer. Non-render is similar for both. Caveat for GEN7 The docs say you cannot change the PDEs of a currently running context. We never map new PDEs of a running context, and expect them to be present - so I think this is okay. (We can unmap, but this should also be okay since we only unmap unreferenced objects that the GPU shouldn't be tryingto va->pa xlate.) The MI_SET_CONTEXT command does have a flag to signal that even if the context is the same, force a reload. It's unclear exactly what this does, but I have a hunch it's the right thing to do. The logic assumes that we always emit a context switch after mapping new PDEs, and before we submit a batch. This is the case today, and has been the case since the inception of hardware contexts. A note in the comment let's the user know. It's not just for gen8. If the current context has mappings change, we need a context reload to switch v2: Rebased after ppgtt clean up patches. Split the warning for aliasing and true ppgtt options. And do not break aliasing ppgtt, where to->ppgtt is always null. v3: Invalidate PPGTT TLBs inside alloc_va_range. v4: Rename ppgtt_invalidate_tlbs to mark_tlbs_dirty and move pd_dirty_rings from i915_address_space to i915_hw_ppgtt. Fixes when neither ctx->ppgtt and aliasing_ppgtt exist. v5: Removed references to teardown_va_range. v6: Updated needs_pd_load_pre/post. v7: Fix pd_dirty_rings check in needs_pd_load_post, and update/move comment about updated PDEs to object_pin/bind (Mika). Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+) Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b6ea85dcf9e6..dd9ab36a039b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -573,8 +573,20 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
struct intel_context *from,
struct intel_context *to)
{
- if (from == to && !to->remap_slice)
- return true;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ if (to->remap_slice)
+ return false;
+
+ if (to->ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &to->ppgtt->pd_dirty_rings))
+ return true;
+ } else if (dev_priv->mm.aliasing_ppgtt) {
+ if (from == to && !test_bit(ring->id,
+ &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+ return true;
+ }
return false;
}
@@ -610,10 +622,7 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to)
if (ring != &dev_priv->ring[RCS])
return false;
- if (!to->legacy_hw_ctx.initialized)
- return true;
-
- if (i915_gem_context_is_default(to))
+ if (to->ppgtt->pd_dirty_rings)
return true;
return false;
@@ -664,6 +673,9 @@ static int do_switch(struct intel_engine_cs *ring,
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
+
+ /* Doing a PD load always reloads the page dirs */
+ clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -696,6 +708,8 @@ static int do_switch(struct intel_engine_cs *ring,
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
+ else if (to->ppgtt && test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ hw_flags |= MI_FORCE_RESTORE;
ret = mi_set_context(ring, to, hw_flags);
if (ret)