diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
21 files changed, 97 insertions, 65 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 8fe0499308ff..4904d0f4162c 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -169,7 +169,7 @@ static u32 *__gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs, return cs; } -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs) +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs) { return __gen2_emit_breadcrumb(rq, cs, 16, 8); } @@ -248,7 +248,7 @@ int i830_emit_bb_start(struct i915_request *rq, return 0; } -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { @@ -292,29 +292,12 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask &= ~engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); - ENGINE_POSTING_READ16(engine, RING_IMR); -} - -void gen2_irq_disable(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - - i915->irq_mask |= engine->irq_enable_mask; - intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); -} - -void gen3_irq_enable(struct intel_engine_cs *engine) -{ engine->i915->irq_mask &= ~engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); } -void gen3_irq_disable(struct intel_engine_cs *engine) +void gen2_irq_disable(struct intel_engine_cs *engine) { engine->i915->irq_mask |= engine->irq_enable_mask; intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h index a5cd64a65c9e..7b37560fc356 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.h @@ -15,13 +15,13 @@ int gen2_emit_flush(struct i915_request *rq, u32 mode); int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode); int gen4_emit_flush_vcs(struct i915_request *rq, u32 mode); -u32 *gen3_emit_breadcrumb(struct i915_request *rq, u32 *cs); +u32 *gen2_emit_breadcrumb(struct i915_request *rq, u32 *cs); u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs); int i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); -int gen3_emit_bb_start(struct i915_request *rq, +int gen2_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags); int gen4_emit_bb_start(struct i915_request *rq, @@ -30,8 +30,6 @@ int gen4_emit_bb_start(struct i915_request *rq, void gen2_irq_enable(struct intel_engine_cs *engine); void gen2_irq_disable(struct intel_engine_cs *engine); -void gen3_irq_enable(struct intel_engine_cs *engine); -void gen3_irq_disable(struct intel_engine_cs *engine); void gen5_irq_enable(struct intel_engine_cs *engine); void gen5_irq_disable(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index d38b914d1206..6e89112f68ae 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -399,7 +399,8 @@ static void emit_batch(struct i915_vma * const vma, batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); batch_add(&cmds, 0xffff0000 | - ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ? + (((IS_IVYBRIDGE(i915) && INTEL_INFO(i915)->gt == 1) || + IS_VALLEYVIEW(i915)) ? HIZ_RAW_STALL_OPT_DISABLE : 0)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 20b9b04ec1e0..cc866773ba6f 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -70,7 +70,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) if (!--b->irq_enabled) b->irq_disable(b); - WRITE_ONCE(b->irq_armed, 0); + WRITE_ONCE(b->irq_armed, NULL); intel_gt_pm_put_async(b->irq_engine->gt, wakeref); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index a8eac59e3779..1c4784cb296c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -15,6 +15,7 @@ #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 #define HEAD_ADDR 0x001FFFFC +#define HEAD_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_START(base) _MMIO((base) + 0x38) #define RING_CTL(base) _MMIO((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ @@ -26,7 +27,6 @@ #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_SYNC_0(base) _MMIO((base) + 0x40) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index bb29f361110e..c4a351ebf395 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -185,7 +185,7 @@ int intel_gt_init_hw(struct intel_gt *gt) if (IS_HASWELL(i915)) intel_uncore_write(uncore, HSW_MI_PREDICATE_RESULT_2, - IS_HASWELL_GT3(i915) ? + INTEL_INFO(i915)->gt == 3 ? LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index ad4c51f18d3a..1240d44eeb85 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -452,10 +452,10 @@ void gen8_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN8_IRQ_RESET_NDX(uncore, GT, 0); - GEN8_IRQ_RESET_NDX(uncore, GT, 1); - GEN8_IRQ_RESET_NDX(uncore, GT, 2); - GEN8_IRQ_RESET_NDX(uncore, GT, 3); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(0)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(1)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(2)); + gen2_irq_reset(uncore, GEN8_GT_IRQ_REGS(3)); } void gen8_gt_irq_postinstall(struct intel_gt *gt) @@ -476,14 +476,14 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt) gt->pm_ier = 0x0; gt->pm_imr = ~gt->pm_ier; - GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); - GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(0), ~gt_interrupts[0], gt_interrupts[0]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(1), ~gt_interrupts[1], gt_interrupts[1]); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. Same wil be the case for GuC interrupts. */ - GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); - GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(2), gt->pm_imr, gt->pm_ier); + gen2_irq_init(uncore, GEN8_GT_IRQ_REGS(3), ~gt_interrupts[3], gt_interrupts[3]); } static void gen5_gt_update_irq(struct intel_gt *gt, @@ -514,9 +514,9 @@ void gen5_gt_irq_reset(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - GEN3_IRQ_RESET(uncore, GT); + gen2_irq_reset(uncore, GT_IRQ_REGS); if (GRAPHICS_VER(gt->i915) >= 6) - GEN3_IRQ_RESET(uncore, GEN6_PM); + gen2_irq_reset(uncore, GEN6_PM_IRQ_REGS); } void gen5_gt_irq_postinstall(struct intel_gt *gt) @@ -538,7 +538,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) else gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; - GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); + gen2_irq_init(uncore, GT_IRQ_REGS, gt->gt_imr, gt_irqs); if (GRAPHICS_VER(gt->i915) >= 6) { /* @@ -551,6 +551,6 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) } gt->pm_imr = 0xffffffff; - GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); + gen2_irq_init(uncore, GEN6_PM_IRQ_REGS, gt->pm_imr, pm_irqs); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 911fd0160221..6f25c747bc29 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -35,7 +35,7 @@ static inline void __intel_gt_pm_get(struct intel_gt *gt) static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt) { if (!intel_wakeref_get_if_active(>->wakeref)) - return 0; + return NULL; return intel_wakeref_track(>->wakeref); } @@ -73,7 +73,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha } #define with_intel_gt_pm(gt, wf) \ - for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get(gt); (wf); intel_gt_pm_put((gt), (wf)), (wf) = NULL) /** * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent @@ -84,7 +84,7 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t ha * @wf: pointer to a temporary wakeref. */ #define with_intel_gt_pm_if_awake(gt, wf) \ - for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0) + for ((wf) = intel_gt_pm_get_if_awake(gt); (wf); intel_gt_pm_put_async((gt), (wf)), (wf) = NULL) static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { @@ -105,9 +105,13 @@ int intel_gt_runtime_resume(struct intel_gt *gt); ktime_t intel_gt_get_awake_time(const struct intel_gt *gt); +#define INTEL_WAKEREF_MOCK_GT ERR_PTR(-ENODEV) + static inline bool is_mock_gt(const struct intel_gt *gt) { - return I915_SELFTEST_ONLY(gt->awake == -ENODEV); + BUILD_BUG_ON(INTEL_WAKEREF_DEF == INTEL_WAKEREF_MOCK_GT); + + return I915_SELFTEST_ONLY(gt->awake == INTEL_WAKEREF_MOCK_GT); } #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 8d08b38874ef..b635aa2820d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -431,7 +431,7 @@ static int llc_show(struct seq_file *m, void *data) max_gpu_freq /= GEN9_FREQ_SCALER; } - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective GPU freq (MHz)\tEffective Ring freq (MHz)\n"); wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 57a3c83d3655..6dba65e54cdb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -432,6 +432,7 @@ #define XEHPG_INSTDONE_GEOM_SVG MCR_REG(0x666c) #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ +#define DISABLE_REPACKING_FOR_COMPRESSION REG_BIT(15) /* jsl+ */ #define RC_OP_FLUSH_ENABLE (1 << 0) #define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) #define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ @@ -1472,6 +1473,10 @@ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) +#define GEN6_PM_IRQ_REGS I915_IRQ_REGS(GEN6_PMIMR, \ + GEN6_PMIER, \ + GEN6_PMIIR) + #define GEN7_GT_SCRATCH(i) _MMIO(0x4f100 + (i) * 4) #define GEN7_GT_SCRATCH_REG_NUM 8 diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7bd5d2c29056..51847a846002 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -820,8 +820,10 @@ static bool ctx_needs_runalone(const struct intel_context *ce) bool ctx_is_protected = false; /* - * On MTL and newer platforms, protected contexts require setting - * the LRC run-alone bit or else the encryption will not happen. + * Wa_14019159160 - Case 2. + * On some platforms, protected contexts require setting + * the LRC run-alone bit or else the encryption/decryption will not happen. + * NOTE: Case 2 only applies to PXP use-case of said workaround. */ if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) && (ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) { @@ -850,6 +852,7 @@ static void init_common_regs(u32 * const regs, if (GRAPHICS_VER(engine->i915) < 11) ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); + /* Wa_14019159160 - Case 2.*/ if (ctx_needs_runalone(ce)) ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE); regs[CTX_CONTEXT_CONTROL] = ctl; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8f1ea95471ef..f42f21632306 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1233,7 +1233,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_disable_interrupts(gt->i915); + intel_irq_suspend(gt->i915); if (do_reset(gt, stalled_mask)) { gt_err(gt, "Failed to reset chip\n"); @@ -1241,7 +1241,7 @@ void intel_gt_reset(struct intel_gt *gt, } if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) - intel_runtime_pm_enable_interrupts(gt->i915); + intel_irq_resume(gt->i915); intel_overlay_reset(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e..32f3b52a183a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -192,6 +192,7 @@ static bool stop_ring(struct intel_engine_cs *engine) static int xcs_resume(struct intel_engine_cs *engine) { struct intel_ring *ring = engine->legacy.ring; + ktime_t kt; ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", ring->head, ring->tail); @@ -230,9 +231,27 @@ static int xcs_resume(struct intel_engine_cs *engine) set_pp_dir(engine); /* First wake the ring up to an empty/idle ring */ - ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + for ((kt) = ktime_get() + (2 * NSEC_PER_MSEC); + ktime_before(ktime_get(), (kt)); cpu_relax()) { + /* + * In case of resets fails because engine resumes from + * incorrect RING_HEAD and then GPU may be then fed + * to invalid instrcutions, which may lead to unrecoverable + * hang. So at first write doesn't succeed then try again. + */ + ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); + if (ENGINE_READ_FW(engine, RING_HEAD) == ring->head) + break; + } + ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); - ENGINE_POSTING_READ(engine, RING_TAIL); + if (ENGINE_READ_FW(engine, RING_HEAD) != ENGINE_READ_FW(engine, RING_TAIL)) { + ENGINE_TRACE(engine, "failed to reset empty ring: [%x, %x]: %x\n", + ENGINE_READ_FW(engine, RING_HEAD), + ENGINE_READ_FW(engine, RING_TAIL), + ring->head); + goto err; + } ENGINE_WRITE_FW(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); @@ -241,12 +260,16 @@ static int xcs_resume(struct intel_engine_cs *engine) if (__intel_wait_for_register_fw(engine->uncore, RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, - 5000, 0, NULL)) + 5000, 0, NULL)) { + ENGINE_TRACE(engine, "failed to restart\n"); goto err; + } - if (GRAPHICS_VER(engine->i915) > 2) + if (GRAPHICS_VER(engine->i915) > 2) { ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_POSTING_READ(engine, RING_MI_MODE); + } /* Now awake, let it get started */ if (ring->tail != ring->head) { @@ -1090,9 +1113,6 @@ static void setup_irq(struct intel_engine_cs *engine) } else if (GRAPHICS_VER(i915) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - } else if (GRAPHICS_VER(i915) >= 3) { - engine->irq_enable = gen3_irq_enable; - engine->irq_disable = gen3_irq_disable; } else { engine->irq_enable = gen2_irq_enable; engine->irq_disable = gen2_irq_disable; @@ -1146,7 +1166,7 @@ static void setup_common(struct intel_engine_cs *engine) * equivalent to our next initial bread so we can elide * engine->emit_init_breadcrumb(). */ - engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; + engine->emit_fini_breadcrumb = gen2_emit_breadcrumb; if (GRAPHICS_VER(i915) == 5) engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; @@ -1159,7 +1179,7 @@ static void setup_common(struct intel_engine_cs *engine) else if (IS_I830(i915) || IS_I845G(i915)) engine->emit_bb_start = i830_emit_bb_start; else - engine->emit_bb_start = gen3_emit_bb_start; + engine->emit_bb_start = gen2_emit_bb_start; } static void setup_rcs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.c b/drivers/gpu/drm/i915/gt/intel_tlb.c index 756e9ebbc725..2487768bc230 100644 --- a/drivers/gpu/drm/i915/gt/intel_tlb.c +++ b/drivers/gpu/drm/i915/gt/intel_tlb.c @@ -122,7 +122,7 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) { intel_wakeref_t wakeref; - if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + if (is_mock_gt(gt)) return; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e539a656cfc3..570c91878189 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -418,7 +418,7 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -2299,6 +2299,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * "Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display." + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } + if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -2537,7 +2546,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_FF_DS_SCHED_HW); /* WaDisablePSDDualDispatchEnable:ivb */ - if (IS_IVB_GT1(i915)) + if (INTEL_INFO(i915)->gt == 1) wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1, GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 097fc6bd1285..5949ff0b0161 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc) static u32 guc_ctl_feature_flags(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); u32 flags = 0; + /* + * Enable PXP GuC autoteardown flow. + * NB: MTL does things differently. + */ + if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915)) + flags |= GUC_CTL_ENABLE_GUC_PXP_CTL; + if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 23f54c84cbab..fe53e8eccf4b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -145,7 +145,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOAD_RETRY_LIMIT 20 #else #define GUC_LOAD_RETRY_LIMIT 3 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 263c9c3f6a03..4ce6e2332a63 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -105,6 +105,7 @@ #define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22) #define GUC_CTL_FEATURE 2 +#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1) #define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index bf16351c9349..222c95f62156 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -14,7 +14,7 @@ #include "intel_guc_log.h" #include "intel_guc_print.h" -#if defined(CONFIG_DRM_I915_DEBUG_GUC) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index ed979847187f..9ede6f240d79 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1339,7 +1339,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt); + wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt); if (wakeref) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2d9152eb7282..d7ac31c3254c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -455,7 +455,7 @@ static const char *auth_mode_string(struct intel_huc *huc, * an end user should hit the timeout is in case of extreme thermal throttling. * And a system that is that hot during boot is probably dead anyway! */ -#if defined(CONFIG_DRM_I915_DEBUG_GEM) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #define HUC_LOAD_RETRY_LIMIT 20 #else #define HUC_LOAD_RETRY_LIMIT 3 |