summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_mmio.h
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-03-30 17:31:57 -0400
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-12 14:05:48 -0500
commitdd08ebf6c3525a7ea2186e636df064ea47281987 (patch)
tree1d4f1667a09c9bfd36a98746d88858aecc677fe0 /drivers/gpu/drm/xe/xe_mmio.h
parenta60501d7c2d3e70b3545b9b96576628e369d8e85 (diff)
downloadlinux-stable-dd08ebf6c3525a7ea2186e636df064ea47281987.tar.gz
linux-stable-dd08ebf6c3525a7ea2186e636df064ea47281987.tar.bz2
linux-stable-dd08ebf6c3525a7ea2186e636df064ea47281987.zip
drm/xe: Introduce a new DRM driver for Intel GPUs
Xe, is a new driver for Intel GPUs that supports both integrated and discrete platforms starting with Tiger Lake (first Intel Xe Architecture). The code is at a stage where it is already functional and has experimental support for multiple platforms starting from Tiger Lake, with initial support implemented in Mesa (for Iris and Anv, our OpenGL and Vulkan drivers), as well as in NEO (for OpenCL and Level0). The new Xe driver leverages a lot from i915. As for display, the intent is to share the display code with the i915 driver so that there is maximum reuse there. But it is not added in this patch. This initial work is a collaboration of many people and unfortunately the big squashed patch won't fully honor the proper credits. But let's get some git quick stats so we can at least try to preserve some of the credits: Co-developed-by: Matthew Brost <matthew.brost@intel.com> Co-developed-by: Matthew Auld <matthew.auld@intel.com> Co-developed-by: Matt Roper <matthew.d.roper@intel.com> Co-developed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Co-developed-by: Francois Dugast <francois.dugast@intel.com> Co-developed-by: Lucas De Marchi <lucas.demarchi@intel.com> Co-developed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Co-developed-by: Philippe Lecluse <philippe.lecluse@intel.com> Co-developed-by: Nirmoy Das <nirmoy.das@intel.com> Co-developed-by: Jani Nikula <jani.nikula@intel.com> Co-developed-by: José Roberto de Souza <jose.souza@intel.com> Co-developed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Co-developed-by: Dave Airlie <airlied@redhat.com> Co-developed-by: Faith Ekstrand <faith.ekstrand@collabora.com> Co-developed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Co-developed-by: Mauro Carvalho Chehab <mchehab@kernel.org> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_mmio.h')
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h110
1 files changed, 110 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
new file mode 100644
index 000000000000..09d24467096f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _XE_MMIO_H_
+#define _XE_MMIO_H_
+
+#include <linux/delay.h>
+
+#include "xe_gt_types.h"
+
+/*
+ * FIXME: This header has been deemed evil and we need to kill it. Temporarily
+ * including so we can use 'wait_for' and unblock initial development. A follow
+ * should replace 'wait_for' with a sane version and drop including this header.
+ */
+#include "i915_utils.h"
+
+struct drm_device;
+struct drm_file;
+struct xe_device;
+
+int xe_mmio_init(struct xe_device *xe);
+
+static inline u8 xe_mmio_read8(struct xe_gt *gt, u32 reg)
+{
+ if (reg < gt->mmio.adj_limit)
+ reg += gt->mmio.adj_offset;
+
+ return readb(gt->mmio.regs + reg);
+}
+
+static inline void xe_mmio_write32(struct xe_gt *gt,
+ u32 reg, u32 val)
+{
+ if (reg < gt->mmio.adj_limit)
+ reg += gt->mmio.adj_offset;
+
+ writel(val, gt->mmio.regs + reg);
+}
+
+static inline u32 xe_mmio_read32(struct xe_gt *gt, u32 reg)
+{
+ if (reg < gt->mmio.adj_limit)
+ reg += gt->mmio.adj_offset;
+
+ return readl(gt->mmio.regs + reg);
+}
+
+static inline u32 xe_mmio_rmw32(struct xe_gt *gt, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 old, reg_val;
+
+ old = xe_mmio_read32(gt, reg);
+ reg_val = (old & mask) | val;
+ xe_mmio_write32(gt, reg, reg_val);
+
+ return old;
+}
+
+static inline void xe_mmio_write64(struct xe_gt *gt,
+ u32 reg, u64 val)
+{
+ if (reg < gt->mmio.adj_limit)
+ reg += gt->mmio.adj_offset;
+
+ writeq(val, gt->mmio.regs + reg);
+}
+
+static inline u64 xe_mmio_read64(struct xe_gt *gt, u32 reg)
+{
+ if (reg < gt->mmio.adj_limit)
+ reg += gt->mmio.adj_offset;
+
+ return readq(gt->mmio.regs + reg);
+}
+
+static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
+ u32 reg, u32 val,
+ u32 mask, u32 eval)
+{
+ u32 reg_val;
+
+ xe_mmio_write32(gt, reg, val);
+ reg_val = xe_mmio_read32(gt, reg);
+
+ return (reg_val & mask) != eval ? -EINVAL : 0;
+}
+
+static inline int xe_mmio_wait32(struct xe_gt *gt,
+ u32 reg, u32 val,
+ u32 mask, u32 timeout_ms)
+{
+ return wait_for((xe_mmio_read32(gt, reg) & mask) == val,
+ timeout_ms);
+}
+
+int xe_mmio_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+static inline bool xe_mmio_in_range(const struct xe_mmio_range *range, u32 reg)
+{
+ return range && reg >= range->start && reg <= range->end;
+}
+
+int xe_mmio_probe_vram(struct xe_device *xe);
+
+#endif