summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_device.c')
-rw-r--r--drivers/gpu/drm/xe/xe_device.c264
1 files changed, 193 insertions, 71 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 4e1839b483a0..00191227bc95 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -49,7 +49,11 @@
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
+#include "xe_pmu.h"
+#include "xe_pxp.h"
#include "xe_query.h"
+#include "xe_shrinker.h"
+#include "xe_survivability_mode.h"
#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
@@ -232,12 +236,117 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
#define xe_drm_compat_ioctl NULL
#endif
+static void barrier_open(struct vm_area_struct *vma)
+{
+ drm_dev_get(vma->vm_private_data);
+}
+
+static void barrier_close(struct vm_area_struct *vma)
+{
+ drm_dev_put(vma->vm_private_data);
+}
+
+static void barrier_release_dummy_page(struct drm_device *dev, void *res)
+{
+ struct page *dummy_page = (struct page *)res;
+
+ __free_page(dummy_page);
+}
+
+static vm_fault_t barrier_fault(struct vm_fault *vmf)
+{
+ struct drm_device *dev = vmf->vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ pgprot_t prot;
+ int idx;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ if (drm_dev_enter(dev, &idx)) {
+ unsigned long pfn;
+
+#define LAST_DB_PAGE_OFFSET 0x7ff001
+ pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
+ LAST_DB_PAGE_OFFSET);
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
+ pgprot_noncached(prot));
+ drm_dev_exit(idx);
+ } else {
+ struct page *page;
+
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
+ prot);
+ }
+
+ return ret;
+}
+
+static const struct vm_operations_struct vm_ops_barrier = {
+ .open = barrier_open,
+ .close = barrier_close,
+ .fault = barrier_fault,
+};
+
+static int xe_pci_barrier_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!IS_DGFX(xe))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start > SZ_4K)
+ return -EINVAL;
+
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ if (vma->vm_flags & (VM_READ | VM_EXEC))
+ return -EINVAL;
+
+ vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
+ vma->vm_ops = &vm_ops_barrier;
+ vma->vm_private_data = dev;
+ drm_dev_get(vma->vm_private_data);
+
+ return 0;
+}
+
+static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ switch (vma->vm_pgoff) {
+ case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
+ return xe_pci_barrier_mmap(filp, vma);
+ }
+
+ return drm_gem_mmap(filp, vma);
+}
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
.unlocked_ioctl = xe_drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = xe_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = xe_drm_compat_ioctl,
@@ -280,6 +389,8 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ xe_bo_dev_fini(&xe->bo_device);
+
if (xe->preempt_fence_wq)
destroy_workqueue(xe->preempt_fence_wq);
@@ -289,6 +400,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
+ if (!IS_ERR_OR_NULL(xe->mem.shrinker))
+ xe_shrinker_destroy(xe->mem.shrinker);
+
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -317,10 +431,15 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (WARN_ON(err))
goto err;
+ xe_bo_dev_init(&xe->bo_device);
err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
if (err)
goto err;
+ xe->mem.shrinker = xe_shrinker_create(xe);
+ if (IS_ERR(xe->mem.shrinker))
+ return ERR_CAST(xe->mem.shrinker);
+
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
@@ -553,7 +672,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void update_device_info(struct xe_device *xe)
+static void sriov_update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
@@ -578,17 +697,29 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
- err = xe_mmio_init(xe);
+ err = xe_mmio_probe_early(xe);
if (err)
return err;
xe_sriov_probe_early(xe);
- update_device_info(xe);
+ sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err)
- return err;
+ if (err) {
+ int save_err = err;
+
+ /*
+ * Try to leave device in survivability mode if device is
+ * possible, but still return the previous error for error
+ * propagation
+ */
+ err = xe_survivability_mode_enable(xe);
+ if (err)
+ return err;
+
+ return save_err;
+ }
err = wait_for_lmem_ready(xe);
if (err)
@@ -623,6 +754,7 @@ static int probe_has_flat_ccs(struct xe_device *xe)
"Flat CCS has been disabled in bios, May lead to performance impact");
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
return 0;
}
@@ -631,7 +763,6 @@ int xe_device_probe(struct xe_device *xe)
struct xe_tile *tile;
struct xe_gt *gt;
int err;
- u8 last_gt;
u8 id;
xe_pat_init_early(xe);
@@ -641,9 +772,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
xe->info.mem_region_mask = 1;
- err = xe_display_init_nommio(xe);
- if (err)
- return err;
err = xe_set_dma_info(xe);
if (err)
@@ -653,7 +781,9 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- xe_ttm_sys_mgr_init(xe);
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
@@ -695,34 +825,32 @@ int xe_device_probe(struct xe_device *xe)
err = xe_devcoredump_init(xe);
if (err)
return err;
- err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
- if (err)
- return err;
- err = xe_display_init_noirq(xe);
+ /*
+ * From here on, if a step fails, make sure a Driver-FLR is triggereed
+ */
+ err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
if (err)
return err;
- err = xe_irq_install(xe);
- if (err)
- goto err;
-
err = probe_has_flat_ccs(xe);
if (err)
- goto err;
+ return err;
err = xe_vram_probe(xe);
if (err)
- goto err;
+ return err;
for_each_tile(tile, xe, id) {
err = xe_tile_init_noalloc(tile);
if (err)
- goto err;
+ return err;
}
/* Allocate and map stolen after potential VRAM resize */
- xe_ttm_stolen_mgr_init(xe);
+ err = xe_ttm_stolen_mgr_init(xe);
+ if (err)
+ return err;
/*
* Now that GT is initialized (TTM in particular),
@@ -730,39 +858,61 @@ int xe_device_probe(struct xe_device *xe)
* This is the reason the first allocation needs to be done
* inside display.
*/
- err = xe_display_init_noaccel(xe);
+ err = xe_display_init_early(xe);
if (err)
- goto err;
+ return err;
- for_each_gt(gt, xe, id) {
- last_gt = id;
+ for_each_tile(tile, xe, id) {
+ err = xe_tile_init(tile);
+ if (err)
+ return err;
+ }
+ err = xe_irq_install(xe);
+ if (err)
+ return err;
+
+ for_each_gt(gt, xe, id) {
err = xe_gt_init(gt);
if (err)
- goto err_fini_gt;
+ return err;
}
- xe_heci_gsc_init(xe);
+ err = xe_heci_gsc_init(xe);
+ if (err)
+ return err;
err = xe_oa_init(xe);
if (err)
- goto err_fini_gt;
+ return err;
err = xe_display_init(xe);
if (err)
- goto err_fini_oa;
+ return err;
+
+ err = xe_pxp_init(xe);
+ if (err)
+ return err;
err = drm_dev_register(&xe->drm, 0);
if (err)
- goto err_fini_display;
+ return err;
xe_display_register(xe);
- xe_oa_register(xe);
+ err = xe_oa_register(xe);
+ if (err)
+ goto err_unregister_display;
+
+ err = xe_pmu_register(&xe->pmu);
+ if (err)
+ goto err_unregister_display;
xe_debugfs_register(xe);
- xe_hwmon_register(xe);
+ err = xe_hwmon_register(xe);
+ if (err)
+ goto err_unregister_display;
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
@@ -771,50 +921,17 @@ int xe_device_probe(struct xe_device *xe)
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
-err_fini_display:
- xe_display_driver_remove(xe);
-
-err_fini_oa:
- xe_oa_fini(xe);
-
-err_fini_gt:
- for_each_gt(gt, xe, id) {
- if (id < last_gt)
- xe_gt_remove(gt);
- else
- break;
- }
+err_unregister_display:
+ xe_display_unregister(xe);
-err:
- xe_display_fini(xe);
return err;
}
-static void xe_device_remove_display(struct xe_device *xe)
+void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
- xe_display_driver_remove(xe);
-}
-
-void xe_device_remove(struct xe_device *xe)
-{
- struct xe_gt *gt;
- u8 id;
-
- xe_oa_unregister(xe);
-
- xe_device_remove_display(xe);
-
- xe_display_fini(xe);
-
- xe_oa_fini(xe);
-
- xe_heci_gsc_fini(xe);
-
- for_each_gt(gt, xe, id)
- xe_gt_remove(gt);
}
void xe_device_shutdown(struct xe_device *xe)
@@ -1003,7 +1120,8 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
- * failure or guc loading failure.
+ * failure or guc loading failure. Userspace will be notified of this state
+ * through device wedged uevent.
* If xe.wedged module parameter is set to 2, this function will be called
* on every single execution timeout (a.k.a. GPU hang) right after devcoredump
* snapshot capture. In this mode, GT reset won't be attempted so the state of
@@ -1033,6 +1151,10 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
for_each_gt(gt, xe, id)