summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_mmio.h
diff options
context:
space:
mode:
authorMatt Roper <matthew.d.roper@intel.com>2023-06-01 14:52:19 -0700
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-19 18:34:11 -0500
commit3b0d4a5579968f1c42044142a4997bab9fe7ffed (patch)
treec8351f2fe7c3e997bc555ce935158b76a4f5a305 /drivers/gpu/drm/xe/xe_mmio.h
parent3643e6371542cc4782d3700f07130c9d250666d8 (diff)
downloadlinux-stable-3b0d4a5579968f1c42044142a4997bab9fe7ffed.tar.gz
linux-stable-3b0d4a5579968f1c42044142a4997bab9fe7ffed.tar.bz2
linux-stable-3b0d4a5579968f1c42044142a4997bab9fe7ffed.zip
drm/xe: Move register MMIO into xe_tile
Each tile has its own register region in the BAR, containing instances of all registers for the platform. In contrast, the multiple GTs within a tile share the same MMIO space; there's just a small subset of registers (the GSI registers) which have multiple copies at different offsets (0x0 for primary GT, 0x380000 for media GT). Move the register MMIO region size/pointers to the tile structure, leaving just the GSI offset information in the GT structure. Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20230601215244.678611-7-matthew.d.roper@intel.com Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_mmio.h')
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h21
1 files changed, 16 insertions, 5 deletions
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index da91729a3854..0ba7aa790f0b 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -10,6 +10,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "regs/xe_reg_defs.h"
+#include "xe_device_types.h"
#include "xe_gt_types.h"
struct drm_device;
@@ -22,27 +23,33 @@ int xe_mmio_init(struct xe_device *xe);
static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
+ struct xe_tile *tile = gt_to_tile(gt);
+
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
- return readb(gt->mmio.regs + reg.addr);
+ return readb(tile->mmio.regs + reg.addr);
}
static inline void xe_mmio_write32(struct xe_gt *gt,
struct xe_reg reg, u32 val)
{
+ struct xe_tile *tile = gt_to_tile(gt);
+
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
- writel(val, gt->mmio.regs + reg.addr);
+ writel(val, tile->mmio.regs + reg.addr);
}
static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{
+ struct xe_tile *tile = gt_to_tile(gt);
+
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
- return readl(gt->mmio.regs + reg.addr);
+ return readl(tile->mmio.regs + reg.addr);
}
static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
@@ -60,18 +67,22 @@ static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
static inline void xe_mmio_write64(struct xe_gt *gt,
struct xe_reg reg, u64 val)
{
+ struct xe_tile *tile = gt_to_tile(gt);
+
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
- writeq(val, gt->mmio.regs + reg.addr);
+ writeq(val, tile->mmio.regs + reg.addr);
}
static inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
{
+ struct xe_tile *tile = gt_to_tile(gt);
+
if (reg.addr < gt->mmio.adj_limit)
reg.addr += gt->mmio.adj_offset;
- return readq(gt->mmio.regs + reg.addr);
+ return readq(tile->mmio.regs + reg.addr);
}
static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,