diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 184 |
1 files changed, 120 insertions, 64 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2cf10d17acfb..173d0095e3b2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -48,6 +48,7 @@ #include "i915_drv.h" #include "i915_trace.h" +#include "i915_pmu.h" #include "i915_vgpu.h" #include "intel_drv.h" #include "intel_uc.h" @@ -321,7 +322,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = USES_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = i915_modparams.semaphores; + value = HAS_LEGACY_SEMAPHORES(dev_priv); break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -371,10 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data, if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { value |= I915_SCHEDULER_CAP_ENABLED; value |= I915_SCHEDULER_CAP_PRIORITY; - - if (INTEL_INFO(dev_priv)->has_logical_ring_preemption && - i915_modparams.enable_execlists && - !i915_modparams.enable_guc_submission) + if (HAS_LOGICAL_RING_PREEMPTION(dev_priv)) value |= I915_SCHEDULER_CAP_PREEMPTION; } break; @@ -407,6 +405,9 @@ static int i915_getparam(struct drm_device *dev, void *data, */ value = 1; break; + case I915_PARAM_HAS_CONTEXT_ISOLATION: + value = intel_engines_has_context_isolation(dev_priv); + break; case I915_PARAM_SLICE_MASK: value = INTEL_INFO(dev_priv)->sseu.slice_mask; if (!value) @@ -417,6 +418,9 @@ static int i915_getparam(struct drm_device *dev, void *data, if (!value) return -ENODEV; break; + case I915_PARAM_CS_TIMESTAMP_FREQUENCY: + value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -613,10 +617,12 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); + intel_uc_fini(dev_priv); i915_gem_cleanup_engines(dev_priv); i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_uc_fini_wq(dev_priv); i915_gem_cleanup_userptr(dev_priv); i915_gem_drain_freed_objects(dev_priv); @@ -677,7 +683,7 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_uc; - intel_modeset_gem_init(dev); + intel_setup_overlay(dev_priv); if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; @@ -689,8 +695,6 @@ static int i915_load_modeset_init(struct drm_device *dev) /* Only enable hotplug handling once the fbdev is fully set up. */ intel_hpd_init(dev_priv); - drm_kms_helper_poll_init(dev); - return 0; cleanup_gem: @@ -724,7 +728,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) if (!ap) return -ENOMEM; - ap->ranges[0].base = ggtt->mappable_base; + ap->ranges[0].base = ggtt->gmadr.start; ap->ranges[0].size = ggtt->mappable_end; primary = @@ -838,6 +842,11 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) * We don't keep the workarounds for pre-production hardware, so we expect our * driver to fail on these machines in one way or another. A little warning on * dmesg may help both the user and the bug triagers. + * + * Our policy for removing pre-production workarounds is to keep the + * current gen workarounds as a guide to the bring-up of the next gen + * (workarounds have a habit of persisting!). Anything older than that + * should be removed along with the complications they introduce. */ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) { @@ -892,7 +901,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); - spin_lock_init(&dev_priv->mm.object_stat_lock); mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); @@ -923,12 +931,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_display_crc_init(dev_priv); - intel_device_info_dump(dev_priv); - intel_detect_preproduction_hw(dev_priv); - i915_perf_init(dev_priv); - return 0; err_irq: @@ -945,7 +949,6 @@ err_engines: */ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { - i915_perf_fini(dev_priv); i915_gem_load_cleanup(dev_priv); intel_irq_fini(dev_priv); i915_workqueues_cleanup(dev_priv); @@ -1048,10 +1051,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv) { - i915_modparams.enable_execlists = - intel_sanitize_enable_execlists(dev_priv, - i915_modparams.enable_execlists); - /* * i915.enable_ppgtt is read-only, so do an early pass to validate the * user's requested state against the hardware/driver capabilities. We @@ -1063,11 +1062,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) i915_modparams.enable_ppgtt); DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - i915_modparams.semaphores = - intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores); - DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", - yesno(i915_modparams.semaphores)); - intel_uc_sanitize_options(dev_priv); intel_gvt_sanitize_options(dev_priv); @@ -1088,10 +1082,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (i915_inject_load_failure()) return -ENODEV; - intel_device_info_runtime_init(dev_priv); + intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); intel_sanitize_options(dev_priv); + i915_perf_init(dev_priv); + ret = i915_ggtt_probe_hw(dev_priv); if (ret) return ret; @@ -1197,6 +1193,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; + i915_perf_fini(dev_priv); + if (pdev->msi_enabled) pci_disable_msi(pdev); @@ -1215,7 +1213,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - i915_gem_shrinker_init(dev_priv); + i915_gem_shrinker_register(dev_priv); + i915_pmu_register(dev_priv); /* * Notify a valid surface after modesetting, @@ -1254,6 +1253,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * cannot run before the connectors are registered. */ intel_fbdev_initial_config_async(dev); + + /* + * We need to coordinate the hotplugs with the asynchronous fbdev + * configuration, for which we use the fbdev->async_cookie. + */ + if (INTEL_INFO(dev_priv)->num_pipes) + drm_kms_helper_poll_init(dev); } /** @@ -1265,17 +1271,40 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) intel_fbdev_unregister(dev_priv); intel_audio_deinit(dev_priv); + /* + * After flushing the fbdev (incl. a late async config which will + * have delayed queuing of a hotplug event), then flush the hotplug + * events. + */ + drm_kms_helper_poll_fini(&dev_priv->drm); + intel_gpu_ips_teardown(); acpi_video_unregister(); intel_opregion_unregister(dev_priv); i915_perf_unregister(dev_priv); + i915_pmu_unregister(dev_priv); i915_teardown_sysfs(dev_priv); i915_guc_log_unregister(dev_priv); drm_dev_unregister(&dev_priv->drm); - i915_gem_shrinker_cleanup(dev_priv); + i915_gem_shrinker_unregister(dev_priv); +} + +static void i915_welcome_messages(struct drm_i915_private *dev_priv) +{ + if (drm_debug & DRM_UT_DRIVER) { + struct drm_printer p = drm_debug_printer("i915 device info:"); + + intel_device_info_dump(&dev_priv->info, &p); + intel_device_info_dump_runtime(&dev_priv->info, &p); + } + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) + DRM_INFO("DRM_I915_DEBUG enabled\n"); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); } /** @@ -1363,13 +1392,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_init_ipc(dev_priv); - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) - DRM_INFO("DRM_I915_DEBUG enabled\n"); - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); - intel_runtime_pm_put(dev_priv); + i915_welcome_messages(dev_priv); + return 0; out_cleanup_hw: @@ -1682,8 +1708,6 @@ static int i915_drm_resume(struct drm_device *dev) intel_csr_ucode_resume(dev_priv); - i915_gem_resume(dev_priv); - i915_restore_state(dev_priv); intel_pps_unlock_regs_wa(dev_priv); intel_opregion_setup(dev_priv); @@ -1704,14 +1728,7 @@ static int i915_drm_resume(struct drm_device *dev) drm_mode_config_reset(dev); - mutex_lock(&dev->struct_mutex); - if (i915_gem_init_hw(dev_priv)) { - DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); - i915_gem_set_wedged(dev_priv); - } - mutex_unlock(&dev->struct_mutex); - - intel_guc_resume(dev_priv); + i915_gem_resume(dev_priv); intel_modeset_init_hw(dev); intel_init_clock_gating(dev_priv); @@ -1745,8 +1762,6 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_notify_adapter(dev_priv, PCI_D0); - intel_autoenable_gt_powersave(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); return 0; @@ -1827,6 +1842,8 @@ static int i915_drm_resume_early(struct drm_device *dev) if (IS_GEN9_LP(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + else + intel_display_set_init_power(dev_priv, true); i915_gem_sanitize(dev_priv); @@ -1874,7 +1891,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) { struct i915_gpu_error *error = &i915->gpu_error; int ret; + int i; + might_sleep(); lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); @@ -1892,22 +1911,30 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) disable_irq(i915->drm.irq); ret = i915_gem_reset_prepare(i915); if (ret) { - DRM_ERROR("GPU recovery failed\n"); + dev_err(i915->drm.dev, "GPU recovery failed\n"); intel_gpu_reset(i915, ALL_ENGINES); - goto error; + goto taint; } - ret = intel_gpu_reset(i915, ALL_ENGINES); - if (ret) { - if (ret != -ENODEV) - DRM_ERROR("Failed to reset chip: %i\n", ret); + if (!intel_has_gpu_reset(i915)) { + if (i915_modparams.reset) + dev_err(i915->drm.dev, "GPU reset not supported\n"); else DRM_DEBUG_DRIVER("GPU reset disabled\n"); goto error; } - i915_gem_reset(i915); - intel_overlay_reset(i915); + for (i = 0; i < 3; i++) { + ret = intel_gpu_reset(i915, ALL_ENGINES); + if (ret == 0) + break; + + msleep(100); + } + if (ret) { + dev_err(i915->drm.dev, "Failed to reset chip\n"); + goto taint; + } /* Ok, now get things going again... */ @@ -1921,6 +1948,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) goto error; } + i915_gem_reset(i915); + intel_overlay_reset(i915); + /* * Next we need to restore the context, but we don't use those * yet either... @@ -1946,12 +1976,32 @@ wakeup: wake_up_bit(&error->flags, I915_RESET_HANDOFF); return; +taint: + /* + * History tells us that if we cannot reset the GPU now, we + * never will. This then impacts everything that is run + * subsequently. On failing the reset, we mark the driver + * as wedged, preventing further execution on the GPU. + * We also want to go one step further and add a taint to the + * kernel so that any subsequent faults can be traced back to + * this failure. This is important for CI, where if the + * GPU/driver fails we would like to reboot and restart testing + * rather than continue on into oblivion. For everyone else, + * the system should still plod along, but they have been warned! + */ + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); error: i915_gem_set_wedged(i915); i915_gem_retire_requests(i915); goto finish; } +static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) +{ + return intel_gpu_reset(dev_priv, intel_engine_flag(engine)); +} + /** * i915_reset_engine - reset GPU engine to recover from a hang * @engine: engine to reset @@ -1973,23 +2023,27 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + active_request = i915_gem_reset_prepare_engine(engine); + if (IS_ERR_OR_NULL(active_request)) { + /* Either the previous reset failed, or we pardon the reset. */ + ret = PTR_ERR(active_request); + goto out; + } + if (!(flags & I915_RESET_QUIET)) { dev_notice(engine->i915->drm.dev, "Resetting %s after gpu hang\n", engine->name); } error->reset_engine_count[engine->id]++; - active_request = i915_gem_reset_prepare_engine(engine); - if (IS_ERR(active_request)) { - DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n"); - ret = PTR_ERR(active_request); - goto out; - } - - ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine)); + if (!engine->i915->guc.execbuf_client) + ret = intel_gt_reset_engine(engine->i915, engine); + else + ret = intel_guc_reset_engine(&engine->i915->guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ - DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n", + DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", + engine->i915->guc.execbuf_client ? "GuC " : "", engine->name, ret); goto out; } @@ -2504,7 +2558,7 @@ static int intel_runtime_suspend(struct device *kdev) struct drm_i915_private *dev_priv = to_i915(dev); int ret; - if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled()))) + if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) @@ -2524,6 +2578,8 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_disable_interrupts(dev_priv); + intel_uncore_suspend(dev_priv); + ret = 0; if (IS_GEN9_LP(dev_priv)) { bxt_display_core_uninit(dev_priv); @@ -2536,6 +2592,8 @@ static int intel_runtime_suspend(struct device *kdev) if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); + intel_uncore_runtime_resume(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); enable_rpm_wakeref_asserts(dev_priv); @@ -2543,8 +2601,6 @@ static int intel_runtime_suspend(struct device *kdev) return ret; } - intel_uncore_suspend(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); |