summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorThomas Zimmermann <tzimmermann@suse.de>2023-06-01 11:31:39 +0200
committerThomas Zimmermann <tzimmermann@suse.de>2023-06-01 11:31:39 +0200
commit33675759a5fa150fb2815089fefe8e5d039354a6 (patch)
treeb12943b1ecac1c31aa7559663027502aab7f4f98 /drivers
parent54df4868fb728bebbb0fb7c3f187eba383e922b5 (diff)
parent85d712f033d23bb56a373e29465470c036532d46 (diff)
downloadlinux-stable-33675759a5fa150fb2815089fefe8e5d039354a6.tar.gz
linux-stable-33675759a5fa150fb2815089fefe8e5d039354a6.tar.bz2
linux-stable-33675759a5fa150fb2815089fefe8e5d039354a6.zip
Merge drm/drm-next into drm-misc-next
Backmerging from drm-next to get commit e24e6d695377 ("drm/i915/display: Implement fb_mmap callback function"). Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/ublk_drv.c2
-rw-r--r--drivers/cxl/core/pci.c1
-rw-r--r--drivers/firewire/net.c21
-rw-r--r--drivers/firmware/sysfb_simplefb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c25
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c16
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c61
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c148
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c60
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h68
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c108
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c10
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c84
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c84
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c70
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c56
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c47
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c42
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h20
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c424
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.h18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h17
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c102
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h27
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c36
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c269
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c59
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c40
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c75
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c16
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c238
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.h14
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c53
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h462
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c27
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c153
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h10
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c87
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c17
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c87
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf_oa_regs.h4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c290
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h28
-rw-r--r--drivers/gpu/drm/i915/i915_request.h52
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h9
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h46
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h2
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c102
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h24
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c444
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h43
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c3
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_regs.h27
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c25
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h24
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0012.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c6
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c41
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c21
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/gsc_proxy/Kconfig14
-rw-r--r--drivers/misc/mei/gsc_proxy/Makefile7
-rw-r--r--drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c208
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/bonding/bond_options.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c2
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/mdio/mdio-mvusb.c11
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h5
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c1
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c6
-rw-r--r--drivers/platform/x86/intel_scu_pcidrv.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c24
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c41
-rw-r--r--drivers/ufs/core/ufshcd.c10
-rw-r--r--drivers/video/fbdev/68328fb.c12
-rw-r--r--drivers/video/fbdev/arcfb.c15
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/cg14.c2
-rw-r--r--drivers/video/fbdev/controlfb.c34
-rw-r--r--drivers/video/fbdev/core/modedb.c5
-rw-r--r--drivers/video/fbdev/g364fb.c6
-rw-r--r--drivers/video/fbdev/hgafb.c36
-rw-r--r--drivers/video/fbdev/hpfb.c8
-rw-r--r--drivers/video/fbdev/imsttfb.c15
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/video/fbdev/maxinefb.c2
-rw-r--r--drivers/video/fbdev/p9100.c4
-rw-r--r--drivers/video/fbdev/platinumfb.c30
-rw-r--r--drivers/video/fbdev/sa1100fb.c32
-rw-r--r--drivers/video/fbdev/stifb.c1
-rw-r--r--drivers/video/fbdev/valkyriefb.c14
-rw-r--r--drivers/video/fbdev/vfb.c10
201 files changed, 4655 insertions, 1488 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9c35c958f2c8..65ecde3e2a5b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1666,7 +1666,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
- if (!dir) {
+ if (IS_ERR(dir)) {
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
nbd_name(nbd));
return -EIO;
@@ -1692,7 +1692,7 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
- if (!dbg_dir)
+ if (IS_ERR(dbg_dir))
return -EIO;
nbd_dbg_dir = dbg_dir;
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index ea7ac8bca63c..da1d0542d7e2 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -241,7 +241,7 @@ static inline blk_opf_t rnbd_to_bio_flags(u32 rnbd_opf)
bio_opf = REQ_OP_WRITE;
break;
case RNBD_OP_FLUSH:
- bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ bio_opf = REQ_OP_WRITE | REQ_PREFLUSH;
break;
case RNBD_OP_DISCARD:
bio_opf = REQ_OP_DISCARD;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c7331f519750..c7ed5d69e9ee 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1281,7 +1281,7 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
{
u32 ioc_type = _IOC_TYPE(cmd_op);
- if (IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
+ if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
return -EOPNOTSUPP;
if (ioc_type != 'u' && ioc_type != 0)
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index bdbd907884ce..f332fe7af92b 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -571,6 +571,7 @@ void read_cdat_data(struct cxl_port *port)
/* Don't leave table data allocated on error */
devm_kfree(dev, cdat_table);
dev_err(dev, "CDAT data read error\n");
+ return;
}
port->cdat.table = cdat_table + sizeof(__le32);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index af22be84034b..538bd677c254 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -706,21 +706,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int rcode;
if (destination == IEEE1394_ALL_NODES) {
- kfree(r);
-
- return;
- }
-
- if (offset != dev->handler.offset)
+ // Although the response to the broadcast packet is not necessarily required, the
+ // fw_send_response() function should still be called to maintain the reference
+ // counting of the object. In the case, the call of function just releases the
+ // object as a result to decrease the reference counting.
+ rcode = RCODE_COMPLETE;
+ } else if (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
- else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ } else if (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
- else if (fwnet_incoming_packet(dev, payload, length,
- source, generation, false) != 0) {
+ } else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
- } else
+ } else {
rcode = RCODE_COMPLETE;
+ }
fw_send_response(card, r, rcode);
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 82c64cb9f531..74363ed7501f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -51,7 +51,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
*
* It's not easily possible to fix this in struct screen_info,
* as this could break UAPI. The best solution is to compute
- * bits_per_pixel here and ignore lfb_depth. In the loop below,
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest. In the loop below,
* ignore simplefb formats with alpha bits, as EFI and VESA
* don't specify alpha channels.
*/
@@ -60,6 +61,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
si->green_size + si->green_pos,
si->blue_size + si->blue_pos),
si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
} else {
bits_per_pixel = si->lfb_depth;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 981a9cfb63b5..5c7d40873ee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3757,6 +3757,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
+ * internal path natively support atomics, set have_atomics_support to true.
+ */
+ else if ((adev->flags & AMD_IS_APU) &&
+ (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
+ adev->have_atomics_support = true;
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
@@ -4506,7 +4512,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
- shadow = &vmbo->bo;
+ /* If vm is compute context or adev is APU, shadow will be NULL */
+ if (!vmbo->shadow)
+ continue;
+ shadow = vmbo->shadow;
+
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9d3a0542c996..f3f541ba0aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -687,9 +687,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
if (r)
return r;
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto late_fini;
+ if (adev->gfx.cp_ecc_error_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ }
} else {
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index a9da0486467a..f5c376276984 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1315,13 +1315,6 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
- /* ECC error */
- r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
- GFX_11_0_0__SRCID__CP_ECC_ERROR,
- &adev->gfx.cp_ecc_error_irq);
- if (r)
- return r;
-
/* FED error */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
@@ -4444,7 +4437,6 @@ static int gfx_v11_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -5897,36 +5889,6 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
}
}
-#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1
-#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \
- do { \
- uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \
- tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \
- WREG32_SOC15_IP(GC, reg_addr, tmp); \
- } while (0)
-
-static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- uint32_t ecc_irq_state = 0;
- uint32_t pipe0_int_cntl_addr = 0;
- int i = 0;
-
- ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0;
-
- pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
-
- WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state);
-
- for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++)
- SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL,
- ecc_irq_state);
-
- return 0;
-}
-
static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -6341,11 +6303,6 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.process = gfx_v11_0_priv_inst_irq,
};
-static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = {
- .set = gfx_v11_0_set_cp_ecc_error_state,
- .process = amdgpu_gfx_cp_ecc_error_irq,
-};
-
static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
.process = gfx_v11_0_rlc_gc_fed_irq,
};
@@ -6361,9 +6318,6 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
- adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */
- adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs;
-
adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index adbcd8127c82..f46d4b18a3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3764,7 +3764,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index c55e09432e26..1c2292cc5f2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 98c826f1f89b..0fb6013441f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
+static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_encode = {
+ .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
+ .codec_array = sc_video_codecs_encode_array,
+};
+
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
@@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
@@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
} else {
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn1;
} else {
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
}
@@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index b3cc04dd8653..9295ac7edd56 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1917,9 +1917,11 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
- for (i = 0; i < adev->sdma.num_instances; i++) {
- amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ }
}
sdma_v4_0_ctx_switch_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 744be2a05623..d77162536514 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -711,7 +711,7 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
- adev->external_rev_id = adev->rev_id + 0x1;
+ adev->external_rev_id = adev->rev_id + 0x80;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 40c488b26901..cc3fe9cac5b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -423,3 +423,68 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
PERF_TRACE();
}
+static void apply_symclk_on_tx_off_wa(struct dc_link *link)
+{
+ /* There are use cases where SYMCLK is referenced by OTG. For instance
+ * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
+ * However current link interface will power off PHY when disabling link
+ * output. This will turn off SYMCLK generated by PHY. The workaround is
+ * to identify such case where SYMCLK is still in use by OTG when we
+ * power off PHY. When this is detected, we will temporarily power PHY
+ * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
+ * program_pix_clk interface. When OTG is disabled, we will then power
+ * off PHY by calling disable link output again.
+ *
+ * In future dcn generations, we plan to rework transmitter control
+ * interface so that we could have an option to set SYMCLK ON TX OFF
+ * state in one step without this workaround
+ */
+
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ if (link->phy_state.symclk_ref_cnts.otg > 0) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dc->link_srv->dp_get_encoding_format(
+ &pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ break;
+ }
+ }
+ }
+}
+
+void dcn314_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+ apply_symclk_on_tx_off_wa(link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index c786d5e6a428..6d0b62503caa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -45,4 +45,6 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
+void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 5267e901a35c..a588f46b166f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -105,7 +105,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .disable_link_output = dce110_disable_link_output,
+ .disable_link_output = dcn314_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 13c7e7394b1c..d75248b6cae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -810,7 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
- v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
+ (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@@ -3310,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
- v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
+ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index 500b3dd6052d..d98e36a9a09c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -53,6 +53,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
+#define MIN_DCFCLK_FREQ_MHZ 200
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 300e156b924f..078aaaa53162 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -36,6 +36,8 @@
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
+#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
+
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1460,15 +1462,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- struct smu_context *smu = adev->powerplay.pp_handle;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = adev->powerplay.pp_handle;
- if ((is_support_sw_smu(adev) && smu->od_enabled) ||
- (is_support_sw_smu(adev) && smu->is_apu) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- return true;
+ return (smu->od_enabled || smu->is_apu);
+ } else {
+ struct pp_hwmgr *hwmgr;
- return false;
+ /*
+ * dpm on some legacy asics don't carry od_enabled member
+ * as its pp_handle is casted directly from adev.
+ */
+ if (amdgpu_dpm_is_legacy_dpm(adev))
+ return false;
+
+ hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
+
+ return hwmgr->od_enabled;
+ }
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 794ffd4a29c5..f32ce29edba7 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -425,11 +425,12 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
/*
- * If we don't have IO space at all, use MMIO now and
- * assume the chip has MMIO enabled by default (rev 0x20
- * and higher).
+ * After AST2500, MMIO is enabled by default, and it should be adopted
+ * to be compatible with Arm.
*/
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
+ if (pdev->revision >= 0x40) {
+ ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
+ } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 933fc78c373b..f0e9549b6bd7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -641,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
+ u32 line_length = info->fix.line_length;
+ u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
- u32 y1 = off / info->fix.line_length;
+ u32 y1 = off / line_length;
u32 x2 = info->var.xres;
- u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
+ u32 y2 = DIV_ROUND_UP(end, line_length);
+
+ /* Don't allow any of them beyond the bottom bound of display area */
+ if (y1 > fb_height)
+ y1 = fb_height;
+ if (y2 > fb_height)
+ y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
- off_t bit_off = (off % info->fix.line_length) * 8;
- off_t bit_end = (end % info->fix.line_length) * 8;
+ off_t bit_off = (off % line_length) * 8;
+ off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 295382cd09b0..3fd6c733ff4e 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
- dsi->dev.of_node = info->node;
+ device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 06a0ca157e89..e4f4d2e3fdfe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -62,10 +62,11 @@ config DRM_I915_FORCE_PROBE
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
- Force probe the i915 for Intel graphics devices that are
- recognized but not properly supported by this kernel version. It is
- recommended to upgrade to a kernel version with proper support as soon
- as it is available.
+ Force probe the i915 driver for Intel graphics devices that are
+ recognized but not properly supported by this kernel version. Force
+ probing an unsupported device taints the kernel. It is recommended to
+ upgrade to a kernel version with proper support as soon as it is
+ available.
It can also be used to block the probe of recognized and fully
supported devices.
@@ -75,7 +76,8 @@ config DRM_I915_FORCE_PROBE
Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
devices. For example, "4500" or "4500,4571".
- Use "*" to force probe the driver for all known devices.
+ Use "*" to force probe the driver for all known devices. Not
+ recommended.
Use "!" right before the ID to block the probe of the device. For
example, "4500,!4571" forces the probe of 4500 and blocks the probe of
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 97b0d4ae221a..c58d7b193664 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -194,6 +194,7 @@ i915-y += \
# general-purpose microcontroller (GuC) support
i915-y += \
gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_proxy.o \
gt/uc/intel_gsc_uc.o \
gt/uc/intel_gsc_uc_heci_cmd_submit.o\
gt/uc/intel_guc.o \
@@ -338,6 +339,7 @@ i915-y += \
i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_cmd.o \
pxp/intel_pxp_debugfs.o \
+ pxp/intel_pxp_gsccs.o \
pxp/intel_pxp_irq.o \
pxp/intel_pxp_pm.o \
pxp/intel_pxp_session.o
@@ -373,7 +375,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $<
+ cmd_checkdoc = $(srctree)/scripts/kernel-doc -none -Werror $<
endif
# header test
@@ -388,7 +390,7 @@ always-$(CONFIG_DRM_I915_WERROR) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \
- $(srctree)/scripts/kernel-doc -none $<; touch $@
+ $(srctree)/scripts/kernel-doc -none -Werror $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 40de9f0f171b..f33164b10292 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -1028,7 +1028,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
int ret;
if (old_obj) {
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state,
to_intel_crtc(old_plane_state->hw.crtc));
@@ -1043,7 +1043,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (intel_crtc_needs_modeset(crtc_state)) {
+ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv,
false, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0bace9d98a1..529ee22be872 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1601,6 +1601,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
+ if (!pipe_config->dsc.slice_count) {
+ drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ pipe_config->dsc.slice_count);
+ return -EINVAL;
+ }
} else {
u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index b8027392144d..7c5fddb203ba 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -43,24 +43,24 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
static void dpt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, level, flags));
+ vm->pte_encode(addr, pat_index, flags));
}
static void dpt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
+ const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
struct sgt_iter sgt_iter;
dma_addr_t addr;
int i;
@@ -83,7 +83,7 @@ static void dpt_clear_range(struct i915_address_space *vm,
static void dpt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
@@ -98,7 +98,7 @@ static void dpt_bind_vma(struct i915_address_space *vm,
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
@@ -300,7 +300,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->pte_encode = gen8_ggtt_pte_encode;
+ vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
dpt->obj = dpt_obj;
dpt->obj->is_dpt = true;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index e5f637897b5e..c004f08fcfe1 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1190,7 +1190,8 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
- return IS_ALDERLAKE_P(i915) && intel_fb_uses_dpt(&fb->base);
+ return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
+ intel_fb_uses_dpt(&fb->base);
}
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
@@ -1326,9 +1327,10 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
unsigned int tile_width,
unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
unsigned int stride_tiles;
- if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
+ if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
stride_tiles = src_stride_tiles;
else
stride_tiles = dst_stride_tiles;
@@ -1522,7 +1524,8 @@ static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_vi
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
- if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ if (view_type == I915_GTT_VIEW_REMAPPED &&
+ (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 673bcdfb7ff6..aab1ae74a8f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,8 +40,10 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_mman.h"
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -67,6 +69,11 @@ struct intel_fbdev {
struct mutex hpd_lock;
};
+static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
+{
+ return container_of(fb_helper, struct intel_fbdev, helper);
+}
+
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
{
return ifbdev->fb->frontbuffer;
@@ -79,9 +86,7 @@ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
static int intel_fbdev_set_par(struct fb_info *info)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct intel_fbdev *ifbdev =
- container_of(fb_helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
int ret;
ret = drm_fb_helper_set_par(info);
@@ -93,9 +98,7 @@ static int intel_fbdev_set_par(struct fb_info *info)
static int intel_fbdev_blank(int blank, struct fb_info *info)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct intel_fbdev *ifbdev =
- container_of(fb_helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
int ret;
ret = drm_fb_helper_blank(blank, info);
@@ -108,9 +111,7 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct intel_fbdev *ifbdev =
- container_of(fb_helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
int ret;
ret = drm_fb_helper_pan_display(var, info);
@@ -120,6 +121,15 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
return ret;
}
+static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct intel_fbdev *fbdev = to_intel_fbdev(info->par);
+ struct drm_gem_object *bo = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
+ struct drm_i915_gem_object *obj = to_intel_bo(bo);
+
+ return i915_gem_fb_mmap(obj, vma);
+}
+
static const struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
@@ -131,13 +141,13 @@ static const struct fb_ops intelfb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
+ .fb_mmap = intel_fbdev_mmap,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct intel_fbdev *ifbdev =
- container_of(helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -163,7 +173,8 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
obj = ERR_PTR(-ENODEV);
if (HAS_LMEM(dev_priv)) {
obj = i915_gem_object_create_lmem(dev_priv, size,
- I915_BO_ALLOC_CONTIGUOUS);
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_USER);
} else {
/*
* If the FB is too big, just don't use it since fbdev is not very
@@ -193,8 +204,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct intel_fbdev *ifbdev =
- container_of(helper, struct intel_fbdev, helper);
+ struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index bb6ea7de5c61..736072a8b2b0 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -110,7 +110,9 @@ initial_plane_vma(struct drm_i915_private *i915,
size * 2 > i915->dsm.usable_size)
return NULL;
- obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
+ obj = i915_gem_object_create_region_at(mem, phys_base, size,
+ I915_BO_ALLOC_USER |
+ I915_BO_PREALLOC);
if (IS_ERR(obj))
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index d2d5a24301b2..dfaaa8b66ac3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -27,8 +27,15 @@ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (IS_DGFX(i915))
return false;
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
+ /*
+ * For objects created by userspace through GEM_CREATE with pat_index
+ * set by set_pat extension, i915_gem_object_has_cache_level() will
+ * always return true, because the coherency of such object is managed
+ * by userspace. Othereise the call here would fall back to checking
+ * whether the object is un-cached or write-through.
+ */
+ return !(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
+ i915_gem_object_has_cache_level(obj, I915_CACHE_WT));
}
bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -267,7 +274,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
int ret;
- if (obj->cache_level == cache_level)
+ /*
+ * For objects created by userspace through GEM_CREATE with pat_index
+ * set by set_pat extension, simply return 0 here without touching
+ * the cache setting, because such objects should have an immutable
+ * cache setting by desgin and always managed by userspace.
+ */
+ if (i915_gem_object_has_cache_level(obj, cache_level))
return 0;
ret = i915_gem_object_wait(obj,
@@ -278,10 +291,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return ret;
/* Always invalidate stale cachelines */
- if (obj->cache_level != cache_level) {
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true;
- }
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true;
/* The cache-level will be applied when each vma is rebound. */
return i915_gem_object_unbind(obj,
@@ -306,20 +317,22 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
goto out;
}
- switch (obj->cache_level) {
- case I915_CACHE_LLC:
- case I915_CACHE_L3_LLC:
- args->caching = I915_CACHING_CACHED;
- break;
+ /*
+ * This ioctl should be disabled for the objects with pat_index
+ * set by user space.
+ */
+ if (obj->pat_set_by_user) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
- case I915_CACHE_WT:
+ if (i915_gem_object_has_cache_level(obj, I915_CACHE_LLC) ||
+ i915_gem_object_has_cache_level(obj, I915_CACHE_L3_LLC))
+ args->caching = I915_CACHING_CACHED;
+ else if (i915_gem_object_has_cache_level(obj, I915_CACHE_WT))
args->caching = I915_CACHING_DISPLAY;
- break;
-
- default:
+ else
args->caching = I915_CACHING_NONE;
- break;
- }
out:
rcu_read_unlock();
return err;
@@ -337,6 +350,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (IS_DGFX(i915))
return -ENODEV;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+ return -EOPNOTSUPP;
+
switch (args->caching) {
case I915_CACHING_NONE:
level = I915_CACHE_NONE;
@@ -365,6 +381,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/*
+ * This ioctl should be disabled for the objects with pat_index
+ * set by user space.
+ */
+ if (obj->pat_set_by_user) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /*
* The caching mode of proxy object is handled by its generator, and
* not allowed to be changed by userspace.
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3aeede6aee4d..5fb459ea4294 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -640,9 +640,15 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
return false;
+ /*
+ * For objects created by userspace through GEM_CREATE with pat_index
+ * set by set_pat extension, i915_gem_object_has_cache_level() always
+ * return true, otherwise the call would fall back to checking whether
+ * the object is un-cached.
+ */
return (cache->has_llc ||
obj->cache_dirty ||
- obj->cache_level != I915_CACHE_NONE);
+ !i915_gem_object_has_cache_level(obj, I915_CACHE_NONE));
}
static int eb_reserve_vma(struct i915_execbuffer *eb,
@@ -1324,7 +1330,10 @@ static void *reloc_iomap(struct i915_vma *batch,
if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
- offset, I915_CACHE_NONE, 0);
+ offset,
+ i915_gem_get_pat_index(ggtt->vm.i915,
+ I915_CACHE_NONE),
+ 0);
} else {
offset += page << PAGE_SHIFT;
}
@@ -1464,7 +1473,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
reloc_cache_unmap(&eb->reloc_cache);
mutex_lock(&vma->vm->mutex);
err = i915_vma_bind(target->vma,
- target->vma->obj->cache_level,
+ target->vma->obj->pat_index,
PIN_GLOBAL, NULL, NULL);
mutex_unlock(&vma->vm->mutex);
reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d3c1dee16af2..4e7a838ab7bd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -383,7 +383,16 @@ retry:
}
/* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ /*
+ * For objects created by userspace through GEM_CREATE with pat_index
+ * set by set_pat extension, coherency is managed by userspace, make
+ * sure we don't fail handling the vm fault by calling
+ * i915_gem_object_has_cache_level() which always return true for such
+ * objects. Otherwise this helper function would fall back to checking
+ * whether the object is un-cached.
+ */
+ if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
+ HAS_LLC(i915))) {
ret = -EFAULT;
goto err_unpin;
}
@@ -927,53 +936,15 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
return file;
}
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+i915_gem_object_mmap(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo,
+ struct vm_area_struct *vma)
{
- struct drm_vma_offset_node *node;
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_i915_gem_object *obj = NULL;
- struct i915_mmap_offset *mmo = NULL;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_device *dev = &i915->drm;
struct file *anon;
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- rcu_read_lock();
- drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (node && drm_vma_node_is_allowed(node, priv)) {
- /*
- * Skip 0-refcnted objects as it is in the process of being
- * destroyed and will be invalid when the vma manager lock
- * is released.
- */
- if (!node->driver_private) {
- mmo = container_of(node, struct i915_mmap_offset, vma_node);
- obj = i915_gem_object_get_rcu(mmo->obj);
-
- GEM_BUG_ON(obj && obj->ops->mmap_ops);
- } else {
- obj = i915_gem_object_get_rcu
- (container_of(node, struct drm_i915_gem_object,
- base.vma_node));
-
- GEM_BUG_ON(obj && !obj->ops->mmap_ops);
- }
- }
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- rcu_read_unlock();
- if (!obj)
- return node ? -EACCES : -EINVAL;
-
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
@@ -1005,7 +976,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
if (obj->ops->mmap_ops) {
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
- vma->vm_private_data = node->driver_private;
+ vma->vm_private_data = obj->base.vma_node.driver_private;
return 0;
}
@@ -1043,6 +1014,91 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_mmap_offset *mmo = NULL;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ rcu_read_lock();
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (node && drm_vma_node_is_allowed(node, priv)) {
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ if (!node->driver_private) {
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
+
+ GEM_BUG_ON(obj && obj->ops->mmap_ops);
+ } else {
+ obj = i915_gem_object_get_rcu
+ (container_of(node, struct drm_i915_gem_object,
+ base.vma_node));
+
+ GEM_BUG_ON(obj && !obj->ops->mmap_ops);
+ }
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
+ if (!obj)
+ return node ? -EACCES : -EINVAL;
+
+ return i915_gem_object_mmap(obj, mmo, vma);
+}
+
+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_device *dev = &i915->drm;
+ struct i915_mmap_offset *mmo = NULL;
+ enum i915_mmap_type mmap_type;
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ /* handle ttm object */
+ if (obj->ops->mmap_ops) {
+ /*
+ * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
+ * to calculate page offset so set that up.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
+ } else {
+ /* handle stolen and smem objects */
+ mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
+ mmo = mmap_offset_attach(obj, mmap_type, NULL);
+ if (!mmo)
+ return -ENODEV;
+ }
+
+ /*
+ * When we install vm_ops for mmap we are too late for
+ * the vm_ops->open() which increases the ref_count of
+ * this obj and then it gets decreased by the vm_ops->close().
+ * To balance this increase the obj ref_count here.
+ */
+ obj = i915_gem_object_get(obj);
+ return i915_gem_object_mmap(obj, mmo, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_mman.c"
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index 1fa91b3033b3..196417fd0f5c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -29,5 +29,5 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
-
+int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 4666bb82f312..46a19b099ec8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -45,6 +45,33 @@ static struct kmem_cache *slab_objects;
static const struct drm_gem_object_funcs i915_gem_object_funcs;
+unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
+ enum i915_cache_level level)
+{
+ if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL))
+ return 0;
+
+ return INTEL_INFO(i915)->cachelevel_to_pat[level];
+}
+
+bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
+ enum i915_cache_level lvl)
+{
+ /*
+ * In case the pat_index is set by user space, this kernel mode
+ * driver should leave the coherency to be managed by user space,
+ * simply return true here.
+ */
+ if (obj->pat_set_by_user)
+ return true;
+
+ /*
+ * Otherwise the pat_index should have been converted from cache_level
+ * so that the following comparison is valid.
+ */
+ return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl);
+}
+
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
struct drm_i915_gem_object *obj;
@@ -124,7 +151,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- obj->cache_level = cache_level;
+ obj->pat_index = i915_gem_get_pat_index(i915, cache_level);
if (cache_level != I915_CACHE_NONE)
obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
@@ -139,6 +166,37 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!IS_DGFX(i915);
}
+/**
+ * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode
+ * @obj: #drm_i915_gem_object
+ * @pat_index: PAT index
+ *
+ * This is a clone of i915_gem_object_set_cache_coherency taking pat index
+ * instead of cache_level as its second argument.
+ */
+void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
+ unsigned int pat_index)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (obj->pat_index == pat_index)
+ return;
+
+ obj->pat_index = pat_index;
+
+ if (pat_index != i915_gem_get_pat_index(i915, I915_CACHE_NONE))
+ obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
+ I915_BO_CACHE_COHERENT_FOR_WRITE);
+ else if (HAS_LLC(i915))
+ obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
+ else
+ obj->cache_coherent = 0;
+
+ obj->cache_dirty =
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
+ !IS_DGFX(i915);
+}
+
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 885ccde9dc3c..884a17275b3a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -20,6 +20,8 @@
enum intel_region_id;
+#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+
static inline bool i915_gem_object_size_2big(u64 size)
{
struct drm_i915_gem_object *obj;
@@ -30,6 +32,10 @@ static inline bool i915_gem_object_size_2big(u64 size)
return false;
}
+unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
+ enum i915_cache_level level);
+bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
+ enum i915_cache_level lvl);
void i915_gem_init__objects(struct drm_i915_private *i915);
void i915_objects_module_exit(void);
@@ -80,7 +86,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
- * @filp: DRM file private date
+ * @file: DRM file private date
* @handle: userspace handle
*
* Returns:
@@ -760,6 +766,8 @@ bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
+void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
+ unsigned int pat_index);
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 5dcbbef31d44..e72c57716bee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -194,6 +194,13 @@ enum i915_cache_level {
* engine.
*/
I915_CACHE_WT,
+ /**
+ * @I915_MAX_CACHE_LEVEL:
+ *
+ * Mark the last entry in the enum. Used for defining cachelevel_to_pat
+ * array for cache_level to pat translation table.
+ */
+ I915_MAX_CACHE_LEVEL,
};
enum i915_map_type {
@@ -328,6 +335,12 @@ struct drm_i915_gem_object {
*/
#define I915_BO_ALLOC_GPU_ONLY BIT(6)
#define I915_BO_ALLOC_CCS_AUX BIT(7)
+/*
+ * Object is allowed to retain its initial data and will not be cleared on first
+ * access if used along with I915_BO_ALLOC_USER. This is mainly to keep
+ * preallocated framebuffer data intact while transitioning it to i915drmfb.
+ */
+#define I915_BO_PREALLOC BIT(8)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
@@ -335,10 +348,11 @@ struct drm_i915_gem_object {
I915_BO_ALLOC_PM_VOLATILE | \
I915_BO_ALLOC_PM_EARLY | \
I915_BO_ALLOC_GPU_ONLY | \
- I915_BO_ALLOC_CCS_AUX)
-#define I915_BO_READONLY BIT(8)
-#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */
-#define I915_BO_PROTECTED BIT(10)
+ I915_BO_ALLOC_CCS_AUX | \
+ I915_BO_PREALLOC)
+#define I915_BO_READONLY BIT(9)
+#define I915_TILING_QUIRK_BIT 10 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(11)
/**
* @mem_flags - Mutable placement-related flags
*
@@ -350,15 +364,43 @@ struct drm_i915_gem_object {
#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
#define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
/**
- * @cache_level: The desired GTT caching level.
+ * @pat_index: The desired PAT index.
+ *
+ * See hardware specification for valid PAT indices for each platform.
+ * This field replaces the @cache_level that contains a value of enum
+ * i915_cache_level since PAT indices are being used by both userspace
+ * and kernel mode driver for caching policy control after GEN12.
+ * In the meantime platform specific tables are created to translate
+ * i915_cache_level into pat index, for more details check the macros
+ * defined i915/i915_pci.c, e.g. PVC_CACHELEVEL.
+ * For backward compatibility, this field contains values exactly match
+ * the entries of enum i915_cache_level for pre-GEN12 platforms (See
+ * LEGACY_CACHELEVEL), so that the PTE encode functions for these
+ * legacy platforms can stay the same.
+ */
+ unsigned int pat_index:6;
+ /**
+ * @pat_set_by_user: Indicate whether pat_index is set by user space
*
- * See enum i915_cache_level for possible values, along with what
- * each does.
+ * This field is set to false by default, only set to true if the
+ * pat_index is set by user space. By design, user space is capable of
+ * managing caching behavior by setting pat_index, in which case this
+ * kernel mode driver should never touch the pat_index.
*/
- unsigned int cache_level:3;
+ unsigned int pat_set_by_user:1;
/**
* @cache_coherent:
*
+ * Note: with the change above which replaced @cache_level with pat_index,
+ * the use of @cache_coherent is limited to the objects created by kernel
+ * or by userspace without pat index specified.
+ * Check for @pat_set_by_user to find out if an object has pat index set
+ * by userspace. The ioctl's to change cache settings have also been
+ * disabled for the objects with pat index set by userspace. Please don't
+ * assume @cache_coherent having the flags set as describe here. A helper
+ * function i915_gem_object_has_cache_level() provides one way to bypass
+ * the use of this field.
+ *
* Track whether the pages are coherent with the GPU if reading or
* writing through the CPU caches. The largely depends on the
* @cache_level setting.
@@ -432,6 +474,16 @@ struct drm_i915_gem_object {
/**
* @cache_dirty:
*
+ * Note: with the change above which replaced cache_level with pat_index,
+ * the use of @cache_dirty is limited to the objects created by kernel
+ * or by userspace without pat index specified.
+ * Check for @pat_set_by_user to find out if an object has pat index set
+ * by userspace. The ioctl's to change cache settings have also been
+ * disabled for the objects with pat_index set by userspace. Please don't
+ * assume @cache_dirty is set as describe here. Also see helper function
+ * i915_gem_object_has_cache_level() for possible ways to bypass the use
+ * of this field.
+ *
* Track if we are we dirty with writes through the CPU cache for this
* object. As a result reading directly from main memory might yield
* stale data.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index ecd86130b74f..89fc8ea6bcfc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -469,7 +469,10 @@ enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
bool always_coherent)
{
- if (i915_gem_object_is_lmem(obj))
+ /*
+ * Wa_22016122933: always return I915_MAP_WC for MTL
+ */
+ if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
return I915_MAP_WC;
if (HAS_LLC(i915) || always_coherent)
return I915_MAP_WB;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index 2dfcc41c0170..8a7650b27cc2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -22,9 +22,7 @@ struct i915_gem_apply_to_region;
*/
struct i915_gem_apply_to_region_ops {
/**
- * process_obj - Process the current object
- * @apply: Embed this for private data.
- * @obj: The current object.
+ * @process_obj: Process the current object
*
* Note that if this function is part of a ww transaction, and
* if returns -EDEADLK for one of the objects, it may be
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 37d1efcd3ca6..cad4a6017f4b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -601,7 +601,14 @@ static int shmem_object_init(struct intel_memory_region *mem,
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(i915))
+ /*
+ * MTL doesn't snoop CPU cache by default for GPU access (namely
+ * 1-way coherency). However some UMD's are currently depending on
+ * that. Make 1-way coherent the default setting for MTL. A follow
+ * up patch will extend the GEM_CREATE uAPI to allow UMD's specify
+ * caching mode at BO creation time
+ */
+ if (HAS_LLC(i915) || (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)))
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index b1672e054b21..214763942aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -460,8 +460,6 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_release(GFP_KERNEL);
}
-#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
-
/**
* i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
* default all object types that support shrinking(see IS_SHRINKABLE), will also
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 8ac376c24aa2..3b094d36a0b0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -535,6 +535,14 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Basic memrange allocator for stolen space. */
drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
+ /*
+ * Access to stolen lmem beyond certain size for MTL A0 stepping
+ * would crash the machine. Disable stolen lmem for userspace access
+ * by setting usable_size to zero.
+ */
+ if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0)
+ i915->dsm.usable_size = 0;
+
return 0;
}
@@ -557,7 +565,9 @@ static void dbg_poison(struct i915_ggtt *ggtt,
ggtt->vm.insert_page(&ggtt->vm, addr,
ggtt->error_capture.start,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(ggtt->vm.i915,
+ I915_CACHE_NONE),
+ 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index f8f6bed1b297..67347e62e29b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -42,8 +42,9 @@ static inline bool i915_ttm_is_ghost_object(struct ttm_buffer_object *bo)
/**
* i915_ttm_to_gem - Convert a struct ttm_buffer_object to an embedding
* struct drm_i915_gem_object.
+ * @bo: Pointer to the ttm buffer object
*
- * Return: Pointer to the embedding struct ttm_buffer_object.
+ * Return: Pointer to the embedding struct drm_i915_gem_object.
*/
static inline struct drm_i915_gem_object *
i915_ttm_to_gem(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index dd188dfcc423..7078af2f8f79 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -214,7 +214,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, deps,
- dst_st->sgl, dst_level,
+ dst_st->sgl,
+ i915_gem_get_pat_index(i915, dst_level),
i915_ttm_gtt_binds_lmem(dst_mem),
0, &rq);
} else {
@@ -228,9 +229,10 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
deps, src_rsgt->table.sgl,
- src_level,
+ i915_gem_get_pat_index(i915, src_level),
i915_ttm_gtt_binds_lmem(bo->resource),
- dst_st->sgl, dst_level,
+ dst_st->sgl,
+ i915_gem_get_pat_index(i915, dst_level),
i915_ttm_gtt_binds_lmem(dst_mem),
&rq);
@@ -576,7 +578,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct dma_fence *migration_fence = NULL;
struct ttm_tt *ttm = bo->ttm;
struct i915_refct_sgt *dst_rsgt;
- bool clear;
+ bool clear, prealloc_bo;
int ret;
if (GEM_WARN_ON(i915_ttm_is_ghost_object(bo))) {
@@ -632,7 +634,8 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return PTR_ERR(dst_rsgt);
clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
- if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) {
+ prealloc_bo = obj->flags & I915_BO_PREALLOC;
+ if (!(clear && ttm && !((ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) && !prealloc_bo))) {
struct i915_deps deps;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 99f39a5feca1..df6c9a84252c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -354,7 +354,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+ obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE);
return obj;
}
@@ -695,8 +695,7 @@ out_put:
return err;
}
-static void close_object_list(struct list_head *objects,
- struct i915_ppgtt *ppgtt)
+static void close_object_list(struct list_head *objects)
{
struct drm_i915_gem_object *obj, *on;
@@ -710,17 +709,36 @@ static void close_object_list(struct list_head *objects,
}
}
-static int igt_mock_ppgtt_huge_fill(void *arg)
+static int igt_ppgtt_huge_fill(void *arg)
{
- struct i915_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
+ struct drm_i915_private *i915 = arg;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ unsigned long max_pages;
unsigned long page_num;
+ struct file *file;
bool single = false;
LIST_HEAD(objects);
IGT_TIMEOUT(end_time);
int err = -ENODEV;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
+ max_pages = vm->total >> PAGE_SHIFT;
+
for_each_prime_number_from(page_num, 1, max_pages) {
struct drm_i915_gem_object *obj;
u64 size = page_num << PAGE_SHIFT;
@@ -750,13 +768,14 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
break;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ /* vma start must be aligned to BIT(21) to allow 2M PTEs */
+ err = i915_vma_pin(vma, 0, BIT(21), PIN_USER);
if (err)
break;
@@ -784,12 +803,13 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
GEM_BUG_ON(!expected_gtt);
GEM_BUG_ON(size);
- if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+ if (!has_pte64 && (obj->base.size < I915_GTT_PAGE_SIZE_2M ||
+ expected_gtt & I915_GTT_PAGE_SIZE_2M))
expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
i915_vma_unpin(vma);
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!has_pte64 && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
if (!IS_ALIGNED(vma->node.start,
I915_GTT_PAGE_SIZE_2M)) {
pr_err("node.start(%llx) not aligned to 2M\n",
@@ -808,7 +828,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
}
if (vma->resource->page_sizes_gtt != expected_gtt) {
- pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
+ pr_err("gtt=%#x, expected=%#x, size=0x%zx, single=%s\n",
vma->resource->page_sizes_gtt, expected_gtt,
obj->base.size, str_yes_no(!!single));
err = -EINVAL;
@@ -823,19 +843,25 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
single = !single;
}
- close_object_list(&objects, ppgtt);
+ close_object_list(&objects);
if (err == -ENOMEM || err == -ENOSPC)
err = 0;
+ i915_vm_put(vm);
+out:
+ fput(file);
return err;
}
-static int igt_mock_ppgtt_64K(void *arg)
+static int igt_ppgtt_64K(void *arg)
{
- struct i915_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
+ struct drm_i915_private *i915 = arg;
+ bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50);
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct file *file;
const struct object_info {
unsigned int size;
unsigned int gtt;
@@ -907,16 +933,41 @@ static int igt_mock_ppgtt_64K(void *arg)
if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
return 0;
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = hugepage_ctx(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+ vm = i915_gem_context_get_eb_vm(ctx);
+
for (i = 0; i < ARRAY_SIZE(objects); ++i) {
unsigned int size = objects[i].size;
unsigned int expected_gtt = objects[i].gtt;
unsigned int offset = objects[i].offset;
unsigned int flags = PIN_USER;
+ /*
+ * For modern GTT models, the requirements for marking a page-table
+ * as 64K have been relaxed. Account for this.
+ */
+ if (has_pte64) {
+ expected_gtt = 0;
+ if (size >= SZ_64K)
+ expected_gtt |= I915_GTT_PAGE_SIZE_64K;
+ if (size & (SZ_64K - 1))
+ expected_gtt |= I915_GTT_PAGE_SIZE_4K;
+ }
+
for (single = 0; single <= 1; single++) {
obj = fake_huge_pages_object(i915, size, !!single);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
@@ -928,7 +979,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_object_unpin;
@@ -945,7 +996,8 @@ static int igt_mock_ppgtt_64K(void *arg)
if (err)
goto out_vma_unpin;
- if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!has_pte64 && !offset &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
if (!IS_ALIGNED(vma->node.start,
I915_GTT_PAGE_SIZE_2M)) {
pr_err("node.start(%llx) not aligned to 2M\n",
@@ -964,9 +1016,10 @@ static int igt_mock_ppgtt_64K(void *arg)
}
if (vma->resource->page_sizes_gtt != expected_gtt) {
- pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
+ pr_err("gtt=%#x, expected=%#x, i=%d, single=%s offset=%#x size=%#x\n",
vma->resource->page_sizes_gtt,
- expected_gtt, i, str_yes_no(!!single));
+ expected_gtt, i, str_yes_no(!!single),
+ offset, size);
err = -EINVAL;
goto out_vma_unpin;
}
@@ -982,7 +1035,7 @@ static int igt_mock_ppgtt_64K(void *arg)
}
}
- return 0;
+ goto out_vm;
out_vma_unpin:
i915_vma_unpin(vma);
@@ -992,7 +1045,10 @@ out_object_unpin:
i915_gem_object_unlock(obj);
out_object_put:
i915_gem_object_put(obj);
-
+out_vm:
+ i915_vm_put(vm);
+out:
+ fput(file);
return err;
}
@@ -1910,8 +1966,6 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_exhaust_device_supported_pages),
SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
- SUBTEST(igt_mock_ppgtt_huge_fill),
- SUBTEST(igt_mock_ppgtt_64K),
};
struct drm_i915_private *dev_priv;
struct i915_ppgtt *ppgtt;
@@ -1962,6 +2016,8 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_sanity_check),
SUBTEST(igt_ppgtt_compact),
SUBTEST(igt_ppgtt_mixed),
+ SUBTEST(igt_ppgtt_huge_fill),
+ SUBTEST(igt_ppgtt_64K),
};
if (!HAS_PPGTT(i915)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index a81fa6a20f5a..ad6a3b2fb387 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -66,7 +66,7 @@ static int live_nop_switch(void *arg)
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
- goto out_file;
+ goto out_ctx;
}
}
@@ -82,7 +82,7 @@ static int live_nop_switch(void *arg)
this = igt_request_alloc(ctx[n], engine);
if (IS_ERR(this)) {
err = PTR_ERR(this);
- goto out_file;
+ goto out_ctx;
}
if (rq) {
i915_request_await_dma_fence(this, &rq->fence);
@@ -93,10 +93,10 @@ static int live_nop_switch(void *arg)
}
if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- intel_gt_set_wedged(to_gt(i915));
+ intel_gt_set_wedged(engine->gt);
i915_request_put(rq);
err = -EIO;
- goto out_file;
+ goto out_ctx;
}
i915_request_put(rq);
@@ -107,7 +107,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_file;
+ goto out_ctx;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
@@ -120,7 +120,7 @@ static int live_nop_switch(void *arg)
this = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(this)) {
err = PTR_ERR(this);
- goto out_file;
+ goto out_ctx;
}
if (rq) { /* Force submission order */
@@ -149,7 +149,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- intel_gt_set_wedged(to_gt(i915));
+ intel_gt_set_wedged(engine->gt);
i915_request_put(rq);
break;
}
@@ -165,7 +165,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_file;
+ goto out_ctx;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -173,6 +173,8 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
+out_ctx:
+ kfree(ctx);
out_file:
fput(file);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index fe6c37fd7859..a93a90b15907 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -219,7 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
continue;
err = intel_migrate_clear(&gt->migrate, &ww, deps,
- obj->mm.pages->sgl, obj->cache_level,
+ obj->mm.pages->sgl, obj->pat_index,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 56279908ed30..a93d8f9f8bc1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1222,7 +1222,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
- obj->mm.pages->sgl, obj->cache_level,
+ obj->mm.pages->sgl, obj->pat_index,
i915_gem_object_is_lmem(obj),
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 5aaacc53fa4c..c2bdc133c89a 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -109,7 +109,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
@@ -117,7 +117,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
unsigned int act_pt = first_entry / GEN6_PTES;
unsigned int act_pte = first_entry % GEN6_PTES;
- const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ const u32 pte_encode = vm->pte_encode(0, pat_index, flags);
struct sgt_dma iter = sgt_dma(vma_res);
gen6_pte_t *vaddr;
@@ -227,7 +227,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
vm->scratch[0]->encode =
vm->pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_NONE, PTE_READ_ONLY);
+ i915_gem_get_pat_index(vm->i915,
+ I915_CACHE_NONE),
+ PTE_READ_ONLY);
vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(vm->scratch[1])) {
@@ -278,7 +280,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4daaa6f55668..f948d33e5ec5 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -29,7 +29,7 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
}
static u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
@@ -40,7 +40,12 @@ static u64 gen8_pte_encode(dma_addr_t addr,
if (flags & PTE_LM)
pte |= GEN12_PPGTT_PTE_LM;
- switch (level) {
+ /*
+ * For pre-gen12 platforms pat_index is the same as enum
+ * i915_cache_level, so the switch-case here is still valid.
+ * See translation table defined by LEGACY_CACHELEVEL.
+ */
+ switch (pat_index) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED;
break;
@@ -55,6 +60,33 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte;
}
+static u64 gen12_pte_encode(dma_addr_t addr,
+ unsigned int pat_index,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~GEN8_PAGE_RW;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_PPGTT_PTE_LM;
+
+ if (pat_index & BIT(0))
+ pte |= GEN12_PPGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= GEN12_PPGTT_PTE_PAT1;
+
+ if (pat_index & BIT(2))
+ pte |= GEN12_PPGTT_PTE_PAT2;
+
+ if (pat_index & BIT(3))
+ pte |= MTL_PPGTT_PTE_PAT3;
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -423,11 +455,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
struct sgt_dma *iter,
u64 idx,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ const gen8_pte_t pte_encode = ppgtt->vm.pte_encode(0, pat_index, flags);
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
@@ -470,10 +502,10 @@ static void
xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
- const gen8_pte_t pte_encode = vm->pte_encode(0, cache_level, flags);
+ const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
u64 start = vma_res->start;
u64 end = start + vma_res->vma_size;
@@ -570,6 +602,7 @@ xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
}
} while (rem >= page_size && index < max);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
vma_res->page_sizes_gtt |= page_size;
} while (iter->sg && sg_dma_len(iter->sg));
}
@@ -577,10 +610,10 @@ xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
u64 start = vma_res->start;
@@ -700,17 +733,17 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
static void gen8_ppgtt_insert(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma_res);
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- if (HAS_64K_PAGES(vm->i915))
- xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
+ if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
+ xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
else
- gen8_ppgtt_insert_huge(vm, vma_res, &iter, cache_level, flags);
+ gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
} else {
u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
@@ -719,7 +752,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
gen8_pdp_for_page_index(vm, idx);
idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
- cache_level, flags);
+ pat_index, flags);
} while (idx);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
@@ -729,7 +762,7 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
@@ -743,14 +776,14 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
GEM_BUG_ON(pt->is_compact);
vaddr = px_vaddr(pt);
- vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
+ vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, pat_index, flags);
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
@@ -773,20 +806,20 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
}
vaddr = px_vaddr(pt);
- vaddr[gen8_pd_index(idx, 0) / 16] = gen8_pte_encode(addr, level, flags);
+ vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
}
static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
if (flags & PTE_LM)
return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
- level, flags);
+ pat_index, flags);
- return gen8_ppgtt_insert_entry(vm, addr, offset, level, flags);
+ return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
}
static int gen8_init_scratch(struct i915_address_space *vm)
@@ -820,8 +853,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
pte_flags |= PTE_LM;
vm->scratch[0]->encode =
- gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_NONE, pte_flags);
+ vm->pte_encode(px_dma(vm->scratch[0]),
+ i915_gem_get_pat_index(vm->i915,
+ I915_CACHE_NONE),
+ pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
@@ -963,7 +998,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
*/
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ ppgtt->vm.pte_encode = gen12_pte_encode;
+ else
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
index f541d19264b4..19c635441642 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -10,13 +10,12 @@
struct i915_address_space;
struct intel_gt;
-enum i915_cache_level;
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2aa63ec521b8..a53b26178f0a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -578,10 +578,13 @@ void intel_context_bind_parent_child(struct intel_context *parent,
child->parallel.parent = parent;
}
-u64 intel_context_get_total_runtime_ns(const struct intel_context *ce)
+u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
u64 total, active;
+ if (ce->ops->update_stats)
+ ce->ops->update_stats(ce);
+
total = ce->stats.runtime.total;
if (ce->ops->flags & COPS_RUNTIME_CYCLES)
total *= ce->engine->gt->clock_period_ns;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 48f888c3da08..a80e3b7c24ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -97,7 +97,7 @@ void intel_context_bind_parent_child(struct intel_context *parent,
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
- * @ce - the context
+ * @ce: the context
*
* Acquire a lock on the pinned status of the HW context, such that the context
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
@@ -111,7 +111,7 @@ static inline int intel_context_lock_pinned(struct intel_context *ce)
/**
* intel_context_is_pinned - Reports the 'pinned' status
- * @ce - the context
+ * @ce: the context
*
* While in use by the GPU, the context, along with its ring and page
* tables is pinned into memory and the GTT.
@@ -133,7 +133,7 @@ static inline void intel_context_cancel_request(struct intel_context *ce,
/**
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
- * @ce - the context
+ * @ce: the context
*
* Releases the lock earlier acquired by intel_context_unlock_pinned().
*/
@@ -375,7 +375,7 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
-u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
+u64 intel_context_get_total_runtime_ns(struct intel_context *ce);
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
static inline u64 intel_context_clock(void)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e36670f2e626..aceaac28a33e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -58,6 +58,8 @@ struct intel_context_ops {
void (*sched_disable)(struct intel_context *ce);
+ void (*update_stats)(struct intel_context *ce);
+
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 5c6c9a6d469c..0aff5bb13c53 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1515,7 +1515,7 @@ int intel_engines_init(struct intel_gt *gt)
}
/**
- * intel_engines_cleanup_common - cleans up the engine state created by
+ * intel_engine_cleanup_common - cleans up the engine state created by
* the common initiailizers.
* @engine: Engine to cleanup.
*
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 960291f88fd6..e99a6fa03d45 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -289,6 +289,7 @@ struct intel_engine_execlists {
*/
u8 csb_head;
+ /* private: selftest */
I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
};
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index cd4f1b126f75..dcedff41a825 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -117,7 +117,7 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
- if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 3c7f1ed92f5b..2a7942fac798 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -220,8 +220,28 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
}
}
+static u64 mtl_ggtt_pte_encode(dma_addr_t addr,
+ unsigned int pat_index,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
+
+ WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK);
+
+ if (flags & PTE_LM)
+ pte |= GEN12_GGTT_PTE_LM;
+
+ if (pat_index & BIT(0))
+ pte |= MTL_GGTT_PTE_PAT0;
+
+ if (pat_index & BIT(1))
+ pte |= MTL_GGTT_PTE_PAT1;
+
+ return pte;
+}
+
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
@@ -240,25 +260,25 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+ gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
ggtt->invalidate(ggtt);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
struct sgt_iter iter;
@@ -315,14 +335,14 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- iowrite32(vm->pte_encode(addr, level, flags), pte);
+ iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
ggtt->invalidate(ggtt);
}
@@ -335,7 +355,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -352,7 +372,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
iowrite32(vm->scratch[0]->encode, gte++);
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
GEM_BUG_ON(gte > end);
/* Fill the allocated but "unused" space beyond the end of the buffer */
@@ -387,14 +407,15 @@ struct insert_page {
struct i915_address_space *vm;
dma_addr_t addr;
u64 offset;
- enum i915_cache_level level;
+ unsigned int pat_index;
};
static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
{
struct insert_page *arg = _arg;
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
+ arg->pat_index, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -403,10 +424,10 @@ static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 unused)
{
- struct insert_page arg = { vm, addr, offset, level };
+ struct insert_page arg = { vm, addr, offset, pat_index };
stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
}
@@ -414,7 +435,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries {
struct i915_address_space *vm;
struct i915_vma_resource *vma_res;
- enum i915_cache_level level;
+ unsigned int pat_index;
u32 flags;
};
@@ -422,7 +443,8 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
+ arg->pat_index, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
@@ -430,10 +452,10 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
- struct insert_entries arg = { vm, vma_res, level, flags };
+ struct insert_entries arg = { vm, vma_res, pat_index, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -462,7 +484,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
void intel_ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
@@ -479,7 +501,7 @@ void intel_ggtt_bind_vma(struct i915_address_space *vm,
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
@@ -628,7 +650,7 @@ err:
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
@@ -640,10 +662,10 @@ static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
if (flags & I915_VMA_LOCAL_BIND)
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
- stash, vma_res, cache_level, flags);
+ stash, vma_res, pat_index, flags);
if (flags & I915_VMA_GLOBAL_BIND)
- vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
vma_res->bound_flags |= flags;
}
@@ -900,7 +922,9 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
ggtt->vm.scratch[0]->encode =
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, pte_flags);
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE),
+ pte_flags);
return 0;
}
@@ -981,11 +1005,19 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+ ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
+ else
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
return ggtt_probe_common(ggtt, size);
}
+/*
+ * For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
+ * so these PTE encode functions are left with using cache_level.
+ * See translation table LEGACY_CACHELEVEL.
+ */
static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -1266,7 +1298,9 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
*/
vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
- obj ? obj->cache_level : 0,
+ obj ? obj->pat_index :
+ i915_gem_get_pat_index(vm->i915,
+ I915_CACHE_NONE),
was_bound);
if (obj) { /* only used during resume => exclusive access */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 1b25a6039152..acec6566b914 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -15,6 +15,7 @@
#include "intel_uncore.h"
#include "intel_rps.h"
#include "pxp/intel_pxp_irq.h"
+#include "uc/intel_gsc_proxy.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -81,6 +82,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
if (instance == OTHER_GSC_INSTANCE)
return intel_gsc_irq_handler(gt, iir);
+ if (instance == OTHER_GSC_HECI_2_INSTANCE)
+ return intel_gsc_proxy_irq_handler(&gt->uc.gsc, iir);
+
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
@@ -100,7 +104,10 @@ static struct intel_gt *pick_gt(struct intel_gt *gt, u8 class, u8 instance)
case VIDEO_ENHANCEMENT_CLASS:
return media_gt;
case OTHER_CLASS:
- if (instance == OTHER_GSC_INSTANCE && HAS_ENGINE(media_gt, GSC0))
+ if (instance == OTHER_GSC_HECI_2_INSTANCE)
+ return media_gt;
+ if ((instance == OTHER_GSC_INSTANCE || instance == OTHER_KCR_INSTANCE) &&
+ HAS_ENGINE(media_gt, GSC0))
return media_gt;
fallthrough;
default:
@@ -256,6 +263,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
u32 irqs = GT_RENDER_USER_INTERRUPT;
u32 guc_mask = intel_uc_wants_guc(&gt->uc) ? GUC_INTR_GUC2HOST : 0;
u32 gsc_mask = 0;
+ u32 heci_mask = 0;
u32 dmask;
u32 smask;
@@ -267,10 +275,16 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
dmask = irqs << 16 | irqs;
smask = irqs << 16;
- if (HAS_ENGINE(gt, GSC0))
+ if (HAS_ENGINE(gt, GSC0)) {
+ /*
+ * the heci2 interrupt is enabled via the same register as the
+ * GSC interrupt, but it has its own mask register.
+ */
gsc_mask = irqs;
- else if (HAS_HECI_GSC(gt->i915))
+ heci_mask = GSC_IRQ_INTF(1); /* HECI2 IRQ for SW Proxy*/
+ } else if (HAS_HECI_GSC(gt->i915)) {
gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
+ }
BUILD_BUG_ON(irqs & 0xffff0000);
@@ -280,7 +294,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
if (gsc_mask)
- intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, gsc_mask);
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, gsc_mask | heci_mask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
@@ -308,6 +322,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
if (gsc_mask)
intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
+ if (heci_mask)
+ intel_uncore_write(uncore, GEN12_HECI2_RSVD_INTR_MASK,
+ ~REG_FIELD_PREP(ENGINE1_MASK, heci_mask));
if (guc_mask) {
/* the enable bit is common for both GTs but the masks are separate */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index e02cb90723ae..c2e69bafd02b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -87,7 +87,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
intel_rc6_unpark(&gt->rc6);
intel_rps_unpark(&gt->rps);
- i915_pmu_gt_unparked(i915);
+ i915_pmu_gt_unparked(gt);
intel_guc_busyness_unpark(gt);
intel_gt_unpark_requests(gt);
@@ -109,7 +109,7 @@ static int __gt_park(struct intel_wakeref *wf)
intel_guc_busyness_park(gt);
i915_vma_parked(gt);
- i915_pmu_gt_parked(i915);
+ i915_pmu_gt_parked(gt);
intel_rps_park(&gt->rps);
intel_rc6_park(&gt->rc6);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 80dbbef86b1d..357e2f865727 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -539,7 +539,10 @@ static bool rps_eval(void *data)
{
struct intel_gt *gt = data;
- return HAS_RPS(gt->i915);
+ if (intel_guc_slpc_is_used(&gt->uc.guc))
+ return false;
+ else
+ return HAS_RPS(gt->i915);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index fd1f9cd35e9d..b8a39c219b60 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -356,7 +356,11 @@
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4)
-#define XEHP_PAT_INDEX(index) MCR_REG(0x4800 + (index) * 4)
+#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
+ 0x4800, 0x4804, \
+ 0x4848, 0x484c)
+#define XEHP_PAT_INDEX(index) MCR_REG(_PAT_INDEX(index))
+#define XELPMP_PAT_INDEX(index) _MMIO(_PAT_INDEX(index))
#define XEHP_TILE0_ADDR_RANGE MCR_REG(0x4900)
#define XEHP_TILE_LMEM_RANGE_SHIFT 8
@@ -525,6 +529,11 @@
#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+#define GEN12_SQCNT1 _MMIO(0x8718)
+#define GEN12_SQCNT1_PMON_ENABLE REG_BIT(30)
+#define GEN12_SQCNT1_OABPC REG_BIT(29)
+#define GEN12_STRICT_RAR_ENABLE REG_BIT(23)
+
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
@@ -1587,6 +1596,7 @@
#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
#define GEN11_CSME (31)
+#define GEN12_HECI_2 (30)
#define GEN11_GUNIT (28)
#define GEN11_GUC (25)
#define MTL_MGUC (24)
@@ -1628,6 +1638,7 @@
/* irq instances for OTHER_CLASS */
#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
+#define OTHER_GSC_HECI_2_INSTANCE 3
#define OTHER_KCR_INSTANCE 4
#define OTHER_GSC_INSTANCE 6
#define OTHER_MEDIA_GUC_INSTANCE 16
@@ -1643,6 +1654,7 @@
#define GEN12_VCS6_VCS7_INTR_MASK _MMIO(0x1900b4)
#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
#define GEN12_VECS2_VECS3_INTR_MASK _MMIO(0x1900d4)
+#define GEN12_HECI2_RSVD_INTR_MASK _MMIO(0x1900e4)
#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
#define MTL_GUC_MGUC_INTR_MASK _MMIO(0x1900e8) /* MTL+ */
#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 28f27091cd3b..ee2b44f896a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -451,6 +451,33 @@ static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
return sysfs_emit(buff, "%u\n", preq);
}
+static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+
+ return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
+}
+
+static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ int err;
+ u32 val;
+
+ err = kstrtou32(buff, 0, &val);
+ if (err)
+ return err;
+
+ err = intel_guc_slpc_set_ignore_eff_freq(slpc, val);
+ return err ?: count;
+}
+
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
@@ -663,6 +690,8 @@ static struct kobj_attribute attr_media_freq_factor_scale =
INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
+INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
+
static const struct attribute *media_perf_power_attrs[] = {
&attr_media_freq_factor.attr,
&attr_media_freq_factor_scale.attr,
@@ -744,6 +773,12 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (ret)
gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
+ if (intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_file(kobj, &attr_slpc_ignore_eff_freq.attr);
+ if (ret)
+ gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
+ }
+
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4f436ba7a3c8..2f6a9be0ffe6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -468,6 +468,44 @@ void gtt_write_workarounds(struct intel_gt *gt)
}
}
+static void xelpmp_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore, XELPMP_PAT_INDEX(0),
+ MTL_PPAT_L4_0_WB);
+ intel_uncore_write(uncore, XELPMP_PAT_INDEX(1),
+ MTL_PPAT_L4_1_WT);
+ intel_uncore_write(uncore, XELPMP_PAT_INDEX(2),
+ MTL_PPAT_L4_3_UC);
+ intel_uncore_write(uncore, XELPMP_PAT_INDEX(3),
+ MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
+ intel_uncore_write(uncore, XELPMP_PAT_INDEX(4),
+ MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
+
+ /*
+ * Remaining PAT entries are left at the hardware-default
+ * fully-cached setting
+ */
+}
+
+static void xelpg_setup_private_ppat(struct intel_gt *gt)
+{
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0),
+ MTL_PPAT_L4_0_WB);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1),
+ MTL_PPAT_L4_1_WT);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2),
+ MTL_PPAT_L4_3_UC);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3),
+ MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
+ intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4),
+ MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
+
+ /*
+ * Remaining PAT entries are left at the hardware-default
+ * fully-cached setting
+ */
+}
+
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
@@ -603,7 +641,14 @@ void setup_private_pat(struct intel_gt *gt)
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+ if (gt->type == GT_MEDIA) {
+ xelpmp_setup_private_ppat(gt->uncore);
+ return;
+ }
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+ xelpg_setup_private_ppat(gt);
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 69ce55f517f5..4d6296cdbcfd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -88,9 +88,17 @@ typedef u64 gen8_pte_t;
#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
#define BYT_PTE_WRITEABLE REG_BIT(1)
+#define MTL_PPGTT_PTE_PAT3 BIT_ULL(62)
#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
+#define GEN12_PPGTT_PTE_PAT2 BIT_ULL(7)
+#define GEN12_PPGTT_PTE_PAT1 BIT_ULL(4)
+#define GEN12_PPGTT_PTE_PAT0 BIT_ULL(3)
-#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+#define MTL_GGTT_PTE_PAT0 BIT_ULL(52)
+#define MTL_GGTT_PTE_PAT1 BIT_ULL(53)
+#define GEN12_GGTT_PTE_ADDR_MASK GENMASK_ULL(45, 12)
+#define MTL_GGTT_PTE_PAT_MASK GENMASK_ULL(53, 52)
#define GEN12_PDE_64K BIT(6)
#define GEN12_PTE_PS64 BIT(8)
@@ -147,7 +155,13 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-enum i915_cache_level;
+#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2)
+#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0)
+#define MTL_PPAT_L4_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3)
+#define MTL_PPAT_L4_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1)
+#define MTL_PPAT_L4_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0)
+#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3)
+#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2)
struct drm_i915_gem_object;
struct i915_fence_reg;
@@ -216,7 +230,7 @@ struct i915_vma_ops {
void (*bind_vma)(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
/*
* Unmap an object from an address space. This usually consists of
@@ -288,7 +302,7 @@ struct i915_address_space {
(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
u64 (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY BIT(0)
#define PTE_LM BIT(1)
@@ -303,20 +317,20 @@ struct i915_address_space {
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void (*raw_insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void (*raw_insert_entries)(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
@@ -493,7 +507,7 @@ static inline void i915_vm_put(struct i915_address_space *vm)
/**
* i915_vm_resv_put - Release a reference on the vm's reservation lock
- * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get()
+ * @vm: The vm whose reservation lock reference we want to release
*/
static inline void i915_vm_resv_put(struct i915_address_space *vm)
{
@@ -563,7 +577,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
void intel_ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
@@ -641,7 +655,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags);
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 81a96c52a92b..a4ec20aaafe2 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1370,7 +1370,9 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
cs, GEN12_GFX_CCS_AUX_NV);
/* Wa_16014892111 */
- if (IS_DG2(ce->engine->i915))
+ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(ce->engine->i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2(ce->engine->i915))
cs = dg2_emit_draw_watermark_setting(cs);
return cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 3f638f198796..6023288b0e2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -45,7 +45,9 @@ static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
* Insert a dummy PTE into every PT that will map to LMEM to ensure
* we have a correctly setup PDE structure for later use.
*/
- vm->insert_page(vm, 0, d->offset, I915_CACHE_NONE, PTE_LM);
+ vm->insert_page(vm, 0, d->offset,
+ i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
+ PTE_LM);
GEM_BUG_ON(!pt->is_compact);
d->offset += SZ_2M;
}
@@ -63,7 +65,9 @@ static void xehpsdv_insert_pte(struct i915_address_space *vm,
* alignment is 64K underneath for the pt, and we are careful
* not to access the space in the void.
*/
- vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, PTE_LM);
+ vm->insert_page(vm, px_dma(pt), d->offset,
+ i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
+ PTE_LM);
d->offset += SZ_64K;
}
@@ -73,7 +77,8 @@ static void insert_pte(struct i915_address_space *vm,
{
struct insert_pte_data *d = data;
- vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
+ vm->insert_page(vm, px_dma(pt), d->offset,
+ i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE;
}
@@ -356,13 +361,13 @@ static int max_pte_pkt_size(struct i915_request *rq, int pkt)
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
u64 offset,
int length)
{
bool has_64K_pages = HAS_64K_PAGES(rq->engine->i915);
- const u64 encode = rq->context->vm->pte_encode(0, cache_level,
+ const u64 encode = rq->context->vm->pte_encode(0, pat_index,
is_lmem ? PTE_LM : 0);
struct intel_ring *ring = rq->ring;
int pkt, dword_length;
@@ -673,17 +678,17 @@ int
intel_context_migrate_copy(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *src,
- enum i915_cache_level src_cache_level,
+ unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
- enum i915_cache_level dst_cache_level,
+ unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out)
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
- enum i915_cache_level ccs_cache_level;
+ unsigned int ccs_pat_index;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
@@ -707,12 +712,12 @@ intel_context_migrate_copy(struct intel_context *ce,
dst_sz = scatter_list_length(dst);
if (src_is_lmem) {
it_ccs = it_dst;
- ccs_cache_level = dst_cache_level;
+ ccs_pat_index = dst_pat_index;
ccs_is_src = false;
} else if (dst_is_lmem) {
bytes_to_cpy = dst_sz;
it_ccs = it_src;
- ccs_cache_level = src_cache_level;
+ ccs_pat_index = src_pat_index;
ccs_is_src = true;
}
@@ -773,7 +778,7 @@ intel_context_migrate_copy(struct intel_context *ce,
src_sz = calculate_chunk_sz(i915, src_is_lmem,
bytes_to_cpy, ccs_bytes_to_cpy);
- len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
+ len = emit_pte(rq, &it_src, src_pat_index, src_is_lmem,
src_offset, src_sz);
if (!len) {
err = -EINVAL;
@@ -784,7 +789,7 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_rq;
}
- err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
+ err = emit_pte(rq, &it_dst, dst_pat_index, dst_is_lmem,
dst_offset, len);
if (err < 0)
goto out_rq;
@@ -811,7 +816,7 @@ intel_context_migrate_copy(struct intel_context *ce,
goto out_rq;
ccs_sz = GET_CCS_BYTES(i915, len);
- err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
+ err = emit_pte(rq, &it_ccs, ccs_pat_index, false,
ccs_is_src ? src_offset : dst_offset,
ccs_sz);
if (err < 0)
@@ -920,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (HAS_FLAT_CCS(i915) && ver >= 12)
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8)
ring_sz = 8;
@@ -931,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (HAS_FLAT_CCS(i915) && ver >= 12) {
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
@@ -979,7 +984,7 @@ int
intel_context_migrate_clear(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out)
@@ -1027,7 +1032,7 @@ intel_context_migrate_clear(struct intel_context *ce,
if (err)
goto out_rq;
- len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
+ len = emit_pte(rq, &it, pat_index, is_lmem, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
goto out_rq;
@@ -1074,10 +1079,10 @@ int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *src,
- enum i915_cache_level src_cache_level,
+ unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
- enum i915_cache_level dst_cache_level,
+ unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out)
{
@@ -1098,8 +1103,8 @@ int intel_migrate_copy(struct intel_migrate *m,
goto out;
err = intel_context_migrate_copy(ce, deps,
- src, src_cache_level, src_is_lmem,
- dst, dst_cache_level, dst_is_lmem,
+ src, src_pat_index, src_is_lmem,
+ dst, dst_pat_index, dst_is_lmem,
out);
intel_context_unpin(ce);
@@ -1113,7 +1118,7 @@ intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out)
@@ -1134,7 +1139,7 @@ intel_migrate_clear(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_migrate_clear(ce, deps, sg, cache_level,
+ err = intel_context_migrate_clear(ce, deps, sg, pat_index,
is_lmem, value, out);
intel_context_unpin(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
index ccc677ec4aa3..11fc09a00c4b 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.h
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.h
@@ -16,7 +16,6 @@ struct i915_request;
struct i915_gem_ww_ctx;
struct intel_gt;
struct scatterlist;
-enum i915_cache_level;
int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt);
@@ -26,20 +25,20 @@ int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *src,
- enum i915_cache_level src_cache_level,
+ unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
- enum i915_cache_level dst_cache_level,
+ unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out);
int intel_context_migrate_copy(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *src,
- enum i915_cache_level src_cache_level,
+ unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
- enum i915_cache_level dst_cache_level,
+ unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out);
@@ -48,7 +47,7 @@ intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out);
@@ -56,7 +55,7 @@ int
intel_context_migrate_clear(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 69b489e8dfed..2c014407225c 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -40,6 +40,10 @@ struct drm_i915_mocs_table {
#define LE_COS(value) ((value) << 15)
#define LE_SSE(value) ((value) << 17)
+/* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
+#define _L4_CACHEABILITY(value) ((value) << 2)
+#define IG_PAT(value) ((value) << 8)
+
/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
@@ -50,6 +54,7 @@ struct drm_i915_mocs_table {
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
#define PVC_NUM_MOCS_ENTRIES 3
+#define MTL_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
/*
@@ -73,6 +78,12 @@ struct drm_i915_mocs_table {
#define L3_2_RESERVED _L3_CACHEABILITY(2)
#define L3_3_WB _L3_CACHEABILITY(3)
+/* L4 caching options */
+#define L4_0_WB _L4_CACHEABILITY(0)
+#define L4_1_WT _L4_CACHEABILITY(1)
+#define L4_2_RESERVED _L4_CACHEABILITY(2)
+#define L4_3_UC _L4_CACHEABILITY(3)
+
#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
[__idx] = { \
.control_value = __control_value, \
@@ -416,6 +427,57 @@ static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
MOCS_ENTRY(2, 0, L3_3_WB),
};
+static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
+ /* Error - Reserved for Non-Use */
+ MOCS_ENTRY(0,
+ IG_PAT(0),
+ L3_LKUP(1) | L3_3_WB),
+ /* Cached - L3 + L4 */
+ MOCS_ENTRY(1,
+ IG_PAT(1),
+ L3_LKUP(1) | L3_3_WB),
+ /* L4 - GO:L3 */
+ MOCS_ENTRY(2,
+ IG_PAT(1),
+ L3_LKUP(1) | L3_1_UC),
+ /* Uncached - GO:L3 */
+ MOCS_ENTRY(3,
+ IG_PAT(1) | L4_3_UC,
+ L3_LKUP(1) | L3_1_UC),
+ /* L4 - GO:Mem */
+ MOCS_ENTRY(4,
+ IG_PAT(1),
+ L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
+ /* Uncached - GO:Mem */
+ MOCS_ENTRY(5,
+ IG_PAT(1) | L4_3_UC,
+ L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
+ /* L4 - L3:NoLKUP; GO:L3 */
+ MOCS_ENTRY(6,
+ IG_PAT(1),
+ L3_1_UC),
+ /* Uncached - L3:NoLKUP; GO:L3 */
+ MOCS_ENTRY(7,
+ IG_PAT(1) | L4_3_UC,
+ L3_1_UC),
+ /* L4 - L3:NoLKUP; GO:Mem */
+ MOCS_ENTRY(8,
+ IG_PAT(1),
+ L3_GLBGO(1) | L3_1_UC),
+ /* Uncached - L3:NoLKUP; GO:Mem */
+ MOCS_ENTRY(9,
+ IG_PAT(1) | L4_3_UC,
+ L3_GLBGO(1) | L3_1_UC),
+ /* Display - L3; L4:WT */
+ MOCS_ENTRY(14,
+ IG_PAT(1) | L4_1_WT,
+ L3_LKUP(1) | L3_3_WB),
+ /* CCS - Non-Displayable */
+ MOCS_ENTRY(15,
+ IG_PAT(1),
+ L3_GLBGO(1) | L3_1_UC),
+};
+
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -445,7 +507,13 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
- if (IS_PONTEVECCHIO(i915)) {
+ if (IS_METEORLAKE(i915)) {
+ table->size = ARRAY_SIZE(mtl_mocs_table);
+ table->table = mtl_mocs_table;
+ table->n_entries = MTL_NUM_MOCS_ENTRIES;
+ table->uc_index = 9;
+ table->unused_entries_index = 1;
+ } else if (IS_PONTEVECCHIO(i915)) {
table->size = ARRAY_SIZE(pvc_mocs_table);
table->table = pvc_mocs_table;
table->n_entries = PVC_NUM_MOCS_ENTRIES;
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 7ecfa672f738..436756bfbb1a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -181,7 +181,7 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
@@ -199,7 +199,7 @@ void ppgtt_bind_vma(struct i915_address_space *vm,
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
- vm->insert_entries(vm, vma_res, cache_level, pte_flags);
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
wmb();
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 8f3cd68d14f8..58bb1c55294c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -53,11 +53,6 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
return rc6_to_gt(rc)->i915;
}
-static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
-{
- intel_uncore_write_fw(uncore, reg, val);
-}
-
static void gen11_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_gt *gt = rc6_to_gt(rc6);
@@ -72,19 +67,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
*/
if (!intel_uc_uses_guc_rc(&gt->uc)) {
/* 2b: Program RC6 thresholds.*/
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+ intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
- set(uncore, GEN6_RC_SLEEP, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
- set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
}
/*
@@ -105,8 +100,8 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
* Broadwell+, To be conservative, we want to factor in a context
* switch on top (due to ksoftirqd).
*/
- set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
- set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
+ intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
+ intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
/* 3a: Enable RC6
*
@@ -122,8 +117,14 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
- /* Wa_16011777198 - Render powergating must remain disabled */
- if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ /*
+ * Wa_16011777198 and BSpec 52698 - Render powergating must be off.
+ * FIXME BSpec is outdated, disabling powergating for MTL is just
+ * temporary wa and should be removed after fixing real cause
+ * of forcewake timeouts.
+ */
+ if (IS_METEORLAKE(gt->i915) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
pg_enable =
GEN9_MEDIA_PG_ENABLE |
@@ -141,7 +142,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
VDN_MFX_POWERGATE_ENABLE(i));
}
- set(uncore, GEN9_PG_ENABLE, pg_enable);
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, pg_enable);
}
static void gen9_rc6_enable(struct intel_rc6 *rc6)
@@ -152,26 +153,26 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 2b: Program RC6 thresholds.*/
if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 11) {
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
/*
* WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
* when CPG is enabled
*/
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
} else {
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
}
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+ intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
- set(uncore, GEN6_RC_SLEEP, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
/*
* 2c: Program Coarse Power Gating Policies.
@@ -194,11 +195,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
* conservative, we have to factor in a context switch on top (due
* to ksoftirqd).
*/
- set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+ intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
/* 3a: Enable RC6 */
- set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
@@ -210,8 +211,8 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
* - Render/Media PG need to be disabled with RC6.
*/
if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
- set(uncore, GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
}
static void gen8_rc6_enable(struct intel_rc6 *rc6)
@@ -221,13 +222,13 @@ static void gen8_rc6_enable(struct intel_rc6 *rc6)
enum intel_engine_id id;
/* 2b: Program RC6 thresholds.*/
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GEN6_RC_SLEEP, 0);
- set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
rc6->ctl_enable =
@@ -245,20 +246,20 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
u32 rc6vids, rc6_mask;
int ret;
- set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+ intel_uncore_write_fw(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ intel_uncore_write_fw(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GEN6_RC_SLEEP, 0);
- set(uncore, GEN6_RC1e_THRESHOLD, 1000);
- set(uncore, GEN6_RC6_THRESHOLD, 50000);
- set(uncore, GEN6_RC6p_THRESHOLD, 150000);
- set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+ intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ intel_uncore_write_fw(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* We don't use those on Haswell */
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
@@ -372,22 +373,22 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
enum intel_engine_id id;
/* 2a: Program RC6 thresholds.*/
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GEN6_RC_SLEEP, 0);
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us (0x186 * 1.28 us) */
- set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x186);
/* Allows RC6 residency counter to work */
- set(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
@@ -399,22 +400,22 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
- set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
- set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+ intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_engine(engine, rc6_to_gt(rc6), id)
- set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
- set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+ intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x557);
/* Allows RC6 residency counter to work */
- set(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
rc6->ctl_enable =
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -575,9 +576,9 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
- set(uncore, GEN9_PG_ENABLE, 0);
- set(uncore, GEN6_RC_CONTROL, 0);
- set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
+ intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
@@ -684,7 +685,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6)
return;
/* Restore HW timers for automatic RC6 entry while busy */
- set(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
+ intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
}
void intel_rc6_park(struct intel_rc6 *rc6)
@@ -704,7 +705,7 @@ void intel_rc6_park(struct intel_rc6 *rc6)
return;
/* Turn off the HW timers and go directly to rc6 */
- set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
+ intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
if (HAS_RC6pp(rc6_to_i915(rc6)))
target = 0x6; /* deepest rc6 */
@@ -712,7 +713,7 @@ void intel_rc6_park(struct intel_rc6 *rc6)
target = 0x5; /* deep rc6 */
else
target = 0x4; /* normal rc6 */
- set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
+ intel_uncore_write_fw(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
@@ -735,7 +736,7 @@ void intel_rc6_fini(struct intel_rc6 *rc6)
/* We want the BIOS C6 state preserved across loads for MTL */
if (IS_METEORLAKE(rc6_to_i915(rc6)) && rc6->bios_state_captured)
- set(uncore, GEN6_RC_STATE, rc6->bios_rc_state);
+ intel_uncore_write_fw(uncore, GEN6_RC_STATE, rc6->bios_rc_state);
pctx = fetch_and_zero(&rc6->pctx);
if (pctx)
@@ -766,18 +767,18 @@ static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
* before we have set the default VLV_COUNTER_CONTROL value. So always
* set the high bit to be safe.
*/
- set(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
do {
tmp = upper;
- set(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
lower = intel_uncore_read_fw(uncore, reg);
- set(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
} while (upper != tmp && --loop);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b925ef47304b..4d2dece96011 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -812,11 +812,25 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
+static void mtl_ctx_gt_tuning_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ dg2_ctx_gt_tuning_init(engine, wal);
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
+ wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
+}
+
static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
+ mtl_ctx_gt_tuning_init(engine, wal);
+
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
/* Wa_14014947963 */
@@ -1695,14 +1709,20 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ /* Wa_14018778641 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
/* Wa_14014830051 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
- /* Wa_18018781329 */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ /* Wa_14015795083 */
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
}
/*
@@ -1715,17 +1735,16 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0)) {
- /*
- * Wa_18018781329
- *
- * Note that although these registers are MCR on the primary
- * GT, the media GT's versions are regular singleton registers.
- */
- wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
- }
+ /*
+ * Wa_14018778641
+ * Wa_18018781329
+ *
+ * Note that although these registers are MCR on the primary
+ * GT, the media GT's versions are regular singleton registers.
+ */
+ wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
debug_dump_steering(gt);
}
@@ -1743,6 +1762,13 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ if (IS_METEORLAKE(gt->i915)) {
+ if (gt->type != GT_MEDIA)
+ wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+
+ wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ }
+
if (IS_PONTEVECCHIO(gt->i915)) {
wa_mcr_write(wal, XEHPC_L3SCRUB,
SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
@@ -2939,7 +2965,7 @@ static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
- if (IS_DG2(i915))
+ if (IS_METEORLAKE(i915) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 87c94314cf67..10e556a7eac4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -5,6 +5,7 @@
#include <linux/sort.h>
+#include "gt/intel_gt_print.h"
#include "i915_selftest.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
@@ -402,7 +403,7 @@ static int live_engine_pm(void *arg)
/* gt wakeref is async (deferred to workqueue) */
if (intel_gt_pm_wait_for_idle(gt)) {
- pr_err("GT failed to idle\n");
+ gt_err(gt, "GT failed to idle\n");
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index e677f2da093d..3def5ca72dec 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -137,7 +137,7 @@ err_free_src:
static int intel_context_copy_ccs(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool write_to_ccs,
struct i915_request **out)
{
@@ -185,7 +185,7 @@ static int intel_context_copy_ccs(struct intel_context *ce,
if (err)
goto out_rq;
- len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ);
+ len = emit_pte(rq, &it, pat_index, true, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
goto out_rq;
@@ -223,7 +223,7 @@ intel_migrate_ccs_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool write_to_ccs,
struct i915_request **out)
{
@@ -243,7 +243,7 @@ intel_migrate_ccs_copy(struct intel_migrate *m,
if (err)
goto out;
- err = intel_context_copy_ccs(ce, deps, sg, cache_level,
+ err = intel_context_copy_ccs(ce, deps, sg, pat_index,
write_to_ccs, out);
intel_context_unpin(ce);
@@ -300,7 +300,7 @@ static int clear(struct intel_migrate *migrate,
/* Write the obj data into ccs surface */
err = intel_migrate_ccs_copy(migrate, &ww, NULL,
obj->mm.pages->sgl,
- obj->cache_level,
+ obj->pat_index,
true, &rq);
if (rq && !err) {
if (i915_request_wait(rq, 0, HZ) < 0) {
@@ -351,7 +351,7 @@ static int clear(struct intel_migrate *migrate,
err = intel_migrate_ccs_copy(migrate, &ww, NULL,
obj->mm.pages->sgl,
- obj->cache_level,
+ obj->pat_index,
false, &rq);
if (rq && !err) {
if (i915_request_wait(rq, 0, HZ) < 0) {
@@ -414,9 +414,9 @@ static int __migrate_copy(struct intel_migrate *migrate,
struct i915_request **out)
{
return intel_migrate_copy(migrate, ww, NULL,
- src->mm.pages->sgl, src->cache_level,
+ src->mm.pages->sgl, src->pat_index,
i915_gem_object_is_lmem(src),
- dst->mm.pages->sgl, dst->cache_level,
+ dst->mm.pages->sgl, dst->pat_index,
i915_gem_object_is_lmem(dst),
out);
}
@@ -428,9 +428,9 @@ static int __global_copy(struct intel_migrate *migrate,
struct i915_request **out)
{
return intel_context_migrate_copy(migrate->context, NULL,
- src->mm.pages->sgl, src->cache_level,
+ src->mm.pages->sgl, src->pat_index,
i915_gem_object_is_lmem(src),
- dst->mm.pages->sgl, dst->cache_level,
+ dst->mm.pages->sgl, dst->pat_index,
i915_gem_object_is_lmem(dst),
out);
}
@@ -455,7 +455,7 @@ static int __migrate_clear(struct intel_migrate *migrate,
{
return intel_migrate_clear(migrate, ww, NULL,
obj->mm.pages->sgl,
- obj->cache_level,
+ obj->pat_index,
i915_gem_object_is_lmem(obj),
value, out);
}
@@ -468,7 +468,7 @@ static int __global_clear(struct intel_migrate *migrate,
{
return intel_context_migrate_clear(migrate->context, NULL,
obj->mm.pages->sgl,
- obj->cache_level,
+ obj->pat_index,
i915_gem_object_is_lmem(obj),
value, out);
}
@@ -648,7 +648,7 @@ static int live_emit_pte_full_ring(void *arg)
*/
pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space);
it = sg_sgt(obj->mm.pages->sgl);
- len = emit_pte(rq, &it, obj->cache_level, false, 0, CHUNK_SZ);
+ len = emit_pte(rq, &it, obj->pat_index, false, 0, CHUNK_SZ);
if (!len) {
err = -EINVAL;
goto out_rq;
@@ -844,7 +844,7 @@ static int wrap_ktime_compare(const void *A, const void *B)
static int __perf_clear_blt(struct intel_context *ce,
struct scatterlist *sg,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
bool is_lmem,
size_t sz)
{
@@ -858,7 +858,7 @@ static int __perf_clear_blt(struct intel_context *ce,
t0 = ktime_get();
- err = intel_context_migrate_clear(ce, NULL, sg, cache_level,
+ err = intel_context_migrate_clear(ce, NULL, sg, pat_index,
is_lmem, 0, &rq);
if (rq) {
if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
@@ -904,7 +904,8 @@ static int perf_clear_blt(void *arg)
err = __perf_clear_blt(gt->migrate.context,
dst->mm.pages->sgl,
- I915_CACHE_NONE,
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
i915_gem_object_is_lmem(dst),
sizes[i]);
@@ -919,10 +920,10 @@ static int perf_clear_blt(void *arg)
static int __perf_copy_blt(struct intel_context *ce,
struct scatterlist *src,
- enum i915_cache_level src_cache_level,
+ unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
- enum i915_cache_level dst_cache_level,
+ unsigned int dst_pat_index,
bool dst_is_lmem,
size_t sz)
{
@@ -937,9 +938,9 @@ static int __perf_copy_blt(struct intel_context *ce,
t0 = ktime_get();
err = intel_context_migrate_copy(ce, NULL,
- src, src_cache_level,
+ src, src_pat_index,
src_is_lmem,
- dst, dst_cache_level,
+ dst, dst_pat_index,
dst_is_lmem,
&rq);
if (rq) {
@@ -994,10 +995,12 @@ static int perf_copy_blt(void *arg)
err = __perf_copy_blt(gt->migrate.context,
src->mm.pages->sgl,
- I915_CACHE_NONE,
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
i915_gem_object_is_lmem(src),
dst->mm.pages->sgl,
- I915_CACHE_NONE,
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
i915_gem_object_is_lmem(dst),
sz);
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index ca009a6a13bd..a8446ab82501 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -131,13 +131,14 @@ static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
u32 *offset)
{
+ struct intel_gt *gt = rq->engine->gt;
u32 addr;
if (!table)
return 0;
if (HAS_GLOBAL_MOCS_REGISTERS(rq->engine->i915))
- addr = global_mocs_offset();
+ addr = global_mocs_offset() + gt->uncore->gsi_offset;
else
addr = mocs_offset(rq->engine);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index a9e0a91bc0e0..79aa6ac66ad2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -86,7 +86,9 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->vm.insert_page(&ggtt->vm, dma,
ggtt->error_capture.start,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap,
@@ -127,7 +129,9 @@ __igt_reset_stolen(struct intel_gt *gt,
ggtt->vm.insert_page(&ggtt->vm, dma,
ggtt->error_capture.start,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap,
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index bd44ce73a504..952c8d52d68a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -70,6 +70,31 @@ static int slpc_set_freq(struct intel_gt *gt, u32 freq)
return err;
}
+static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
+{
+ int err;
+
+ err = slpc_set_max_freq(slpc, max);
+ if (err) {
+ pr_err("Unable to restore max freq");
+ return err;
+ }
+
+ err = slpc_set_min_freq(slpc, min);
+ if (err) {
+ pr_err("Unable to restore min freq");
+ return err;
+ }
+
+ err = intel_guc_slpc_set_ignore_eff_freq(slpc, false);
+ if (err) {
+ pr_err("Unable to restore efficient freq");
+ return err;
+ }
+
+ return 0;
+}
+
static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
{
int err = 0;
@@ -268,8 +293,7 @@ static int run_test(struct intel_gt *gt, int test_type)
/*
* Set min frequency to RPn so that we can test the whole
- * range of RPn-RP0. This also turns off efficient freq
- * usage and makes results more predictable.
+ * range of RPn-RP0.
*/
err = slpc_set_min_freq(slpc, slpc->min_freq);
if (err) {
@@ -277,6 +301,15 @@ static int run_test(struct intel_gt *gt, int test_type)
return err;
}
+ /*
+ * Turn off efficient frequency so RPn/RP0 ranges are obeyed.
+ */
+ err = intel_guc_slpc_set_ignore_eff_freq(slpc, true);
+ if (err) {
+ pr_err("Unable to turn off efficient freq!");
+ return err;
+ }
+
intel_gt_pm_wait_for_idle(gt);
intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
@@ -358,9 +391,8 @@ static int run_test(struct intel_gt *gt, int test_type)
break;
}
- /* Restore min/max frequencies */
- slpc_set_max_freq(slpc, slpc_max_freq);
- slpc_set_min_freq(slpc, slpc_min_freq);
+ /* Restore min/max/efficient frequencies */
+ err = slpc_restore_freq(slpc, slpc_min_freq, slpc_max_freq);
if (igt_flush_test(gt->i915))
err = -EIO;
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 9f536c251179..39c3ec12df1a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -836,7 +836,7 @@ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt,
return PTR_ERR(obj);
/* keep the same cache settings as timeline */
- i915_gem_object_set_cache_coherency(obj, tl->hwsp_ggtt->obj->cache_level);
+ i915_gem_object_set_pat_index(obj, tl->hwsp_ggtt->obj->pat_index);
w->map = i915_gem_object_pin_map_unlocked(obj,
page_unmask_bits(tl->hwsp_ggtt->obj->mm.mapping));
if (IS_ERR(w->map)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index e6cac1f15d6e..4493c8518e91 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -36,6 +36,8 @@ pte_tlbinv(struct intel_context *ce,
u64 length,
struct rnd_state *prng)
{
+ const unsigned int pat_index =
+ i915_gem_get_pat_index(ce->vm->i915, I915_CACHE_NONE);
struct drm_i915_gem_object *batch;
struct drm_mm_node vb_node;
struct i915_request *rq;
@@ -155,7 +157,7 @@ pte_tlbinv(struct intel_context *ce,
/* Flip the PTE between A and B */
if (i915_gem_object_is_lmem(vb->obj))
pte_flags |= PTE_LM;
- ce->vm->insert_entries(ce->vm, &vb_res, 0, pte_flags);
+ ce->vm->insert_entries(ce->vm, &vb_res, pat_index, pte_flags);
/* Flush the PTE update to concurrent HW */
tlbinv(ce->vm, addr & -length, length);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index bcb1129b3610..dabeaf4f245f 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -44,6 +44,7 @@ enum intel_guc_load_status {
enum intel_bootrom_load_status {
INTEL_BOOTROM_STATUS_NO_KEY_FOUND = 0x13,
INTEL_BOOTROM_STATUS_AES_PROD_KEY_FOUND = 0x1A,
+ INTEL_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE = 0x2B,
INTEL_BOOTROM_STATUS_RSA_FAILED = 0x50,
INTEL_BOOTROM_STATUS_PAVPC_FAILED = 0x73,
INTEL_BOOTROM_STATUS_WOPCM_FAILED = 0x74,
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 9d589c28f40f..1fc0c17b1230 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -12,7 +12,7 @@
struct intel_guc;
struct file;
-/**
+/*
* struct __guc_capture_bufstate
*
* Book-keeping structure used to track read and write pointers
@@ -26,7 +26,7 @@ struct __guc_capture_bufstate {
u32 wr;
};
-/**
+/*
* struct __guc_capture_parsed_output - extracted error capture node
*
* A single unit of extracted error-capture output data grouped together
@@ -58,7 +58,7 @@ struct __guc_capture_parsed_output {
#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE)
};
-/**
+/*
* struct guc_debug_capture_list_header / struct guc_debug_capture_list
*
* As part of ADS registration, these header structures (followed by
@@ -76,7 +76,7 @@ struct guc_debug_capture_list {
struct guc_mmio_reg regs[];
} __packed;
-/**
+/*
* struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group
*
* intel_guc_capture module uses these structures to maintain static
@@ -101,7 +101,7 @@ struct __guc_mmio_reg_descr_group {
struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
};
-/**
+/*
* struct guc_state_capture_header_t / struct guc_state_capture_t /
* guc_state_capture_group_header_t / guc_state_capture_group_t
*
@@ -148,7 +148,7 @@ struct guc_state_capture_group_t {
struct guc_state_capture_t capture_entries[];
} __packed;
-/**
+/*
* struct __guc_capture_ads_cache
*
* A structure to cache register lists that were populated and registered
@@ -187,6 +187,10 @@ struct intel_guc_state_capture {
struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
[GUC_CAPTURE_LIST_TYPE_MAX]
[GUC_MAX_ENGINE_CLASSES];
+
+ /**
+ * @ads_null_cache: ADS null cache.
+ */
void *ads_null_cache;
/**
@@ -202,6 +206,10 @@ struct intel_guc_state_capture {
struct list_head cachelist;
#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+
+ /**
+ * @max_mmio_per_node: Max MMIO per node.
+ */
int max_mmio_per_node;
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index 1d9fdfb11268..f46eb17a7a98 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -13,6 +13,7 @@
#define GSC_FW_STATUS_REG _MMIO(0x116C40)
#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
#define GSC_FW_CURRENT_STATE_RESET 0
+#define GSC_FW_PROXY_STATE_NORMAL 5
#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
static bool gsc_is_in_reset(struct intel_uncore *uncore)
@@ -23,6 +24,15 @@ static bool gsc_is_in_reset(struct intel_uncore *uncore)
GSC_FW_CURRENT_STATE_RESET;
}
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc)
+{
+ struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
+ u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+
+ return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
+ GSC_FW_PROXY_STATE_NORMAL;
+}
+
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
{
struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
@@ -110,6 +120,13 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
if (obj->base.size < gsc->fw.size)
return -ENOSPC;
+ /*
+ * Wa_22016122933: For MTL the shared memory needs to be mapped
+ * as WC on CPU side and UC (PAT index 2) on GPU side
+ */
+ if (IS_METEORLAKE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
dst = i915_gem_object_pin_map_unlocked(obj,
i915_coherent_map_type(i915, obj, true));
if (IS_ERR(dst))
@@ -125,6 +142,12 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memset(dst, 0, obj->base.size);
memcpy(dst, src, gsc->fw.size);
+ /*
+ * Wa_22016122933: Making sure the data in dst is
+ * visible to GSC right away
+ */
+ intel_guc_write_barrier(&gt->uc.guc);
+
i915_gem_object_unpin_map(gsc->fw.obj);
i915_gem_object_unpin_map(obj);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
index f4c1106bb2a9..fff8928218df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
@@ -13,5 +13,6 @@ struct intel_uncore;
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
+bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
new file mode 100644
index 000000000000..ebee0b5a2c1d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/component.h>
+
+#include "drm/i915_component.h"
+#include "drm/i915_gsc_proxy_mei_interface.h"
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
+#include "intel_gsc_proxy.h"
+#include "intel_gsc_uc.h"
+#include "intel_gsc_uc_heci_cmd_submit.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+/*
+ * GSC proxy:
+ * The GSC uC needs to communicate with the CSME to perform certain operations.
+ * Since the GSC can't perform this communication directly on platforms where it
+ * is integrated in GT, i915 needs to transfer the messages from GSC to CSME
+ * and back. i915 must manually start the proxy flow after the GSC is loaded to
+ * signal to GSC that we're ready to handle its messages and allow it to query
+ * its init data from CSME; GSC will then trigger an HECI2 interrupt if it needs
+ * to send messages to CSME again.
+ * The proxy flow is as follow:
+ * 1 - i915 submits a request to GSC asking for the message to CSME
+ * 2 - GSC replies with the proxy header + payload for CSME
+ * 3 - i915 sends the reply from GSC as-is to CSME via the mei proxy component
+ * 4 - CSME replies with the proxy header + payload for GSC
+ * 5 - i915 submits a request to GSC with the reply from CSME
+ * 6 - GSC replies either with a new header + payload (same as step 2, so we
+ * restart from there) or with an end message.
+ */
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define GSC_PROXY_INIT_TIMEOUT_MS 20000
+
+/* the protocol supports up to 32K in each direction */
+#define GSC_PROXY_BUFFER_SIZE SZ_32K
+#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
+#define GSC_PROXY_MAX_MSG_SIZE (GSC_PROXY_BUFFER_SIZE - sizeof(struct intel_gsc_mtl_header))
+
+/* FW-defined proxy header */
+struct intel_gsc_proxy_header {
+ /*
+ * hdr:
+ * Bits 0-7: type of the proxy message (see enum intel_gsc_proxy_type)
+ * Bits 8-15: rsvd
+ * Bits 16-31: length in bytes of the payload following the proxy header
+ */
+ u32 hdr;
+#define GSC_PROXY_TYPE GENMASK(7, 0)
+#define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16)
+
+ u32 source; /* Source of the Proxy message */
+ u32 destination; /* Destination of the Proxy message */
+#define GSC_PROXY_ADDRESSING_KMD 0x10000
+#define GSC_PROXY_ADDRESSING_GSC 0x20000
+#define GSC_PROXY_ADDRESSING_CSME 0x30000
+
+ u32 status; /* Command status */
+} __packed;
+
+/* FW-defined proxy types */
+enum intel_gsc_proxy_type {
+ GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0,
+ GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1,
+ GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2,
+ GSC_PROXY_MSG_TYPE_PROXY_END = 3,
+ GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4,
+};
+
+struct gsc_proxy_msg {
+ struct intel_gsc_mtl_header header;
+ struct intel_gsc_proxy_header proxy_header;
+} __packed;
+
+static int proxy_send_to_csme(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct i915_gsc_proxy_component *comp = gsc->proxy.component;
+ struct intel_gsc_mtl_header *hdr;
+ void *in = gsc->proxy.to_csme;
+ void *out = gsc->proxy.to_gsc;
+ u32 in_size;
+ int ret;
+
+ /* CSME msg only includes the proxy */
+ hdr = in;
+ in += sizeof(struct intel_gsc_mtl_header);
+ out += sizeof(struct intel_gsc_mtl_header);
+
+ in_size = hdr->message_size - sizeof(struct intel_gsc_mtl_header);
+
+ /* the message must contain at least the proxy header */
+ if (in_size < sizeof(struct intel_gsc_proxy_header) ||
+ in_size > GSC_PROXY_MAX_MSG_SIZE) {
+ gt_err(gt, "Invalid CSME message size: %u\n", in_size);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->send(comp->mei_dev, in, in_size);
+ if (ret < 0) {
+ gt_err(gt, "Failed to send CSME message\n");
+ return ret;
+ }
+
+ ret = comp->ops->recv(comp->mei_dev, out, GSC_PROXY_MAX_MSG_SIZE);
+ if (ret < 0) {
+ gt_err(gt, "Failed to receive CSME message\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int proxy_send_to_gsc(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ u32 *marker = gsc->proxy.to_csme; /* first dw of the reply header */
+ u64 addr_in = i915_ggtt_offset(gsc->proxy.vma);
+ u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
+ u32 size = ((struct gsc_proxy_msg *)gsc->proxy.to_gsc)->header.message_size;
+ int err;
+
+ /* the message must contain at least the gsc and proxy headers */
+ if (size < sizeof(struct gsc_proxy_msg) || size > GSC_PROXY_BUFFER_SIZE) {
+ gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
+ return -EINVAL;
+ }
+
+ /* clear the message marker */
+ *marker = 0;
+
+ /* make sure the marker write is flushed */
+ wmb();
+
+ /* send the request */
+ err = intel_gsc_uc_heci_cmd_submit_packet(gsc, addr_in, size,
+ addr_out, GSC_PROXY_BUFFER_SIZE);
+
+ if (!err) {
+ /* wait for the reply to show up */
+ err = wait_for(*marker != 0, 300);
+ if (err)
+ gt_err(gt, "Failed to get a proxy reply from gsc\n");
+ }
+
+ return err;
+}
+
+static int validate_proxy_header(struct intel_gsc_proxy_header *header,
+ u32 source, u32 dest)
+{
+ u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
+ u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+ int ret = 0;
+
+ if (header->destination != dest || header->source != source) {
+ ret = -ENOEXEC;
+ goto fail;
+ }
+
+ switch (type) {
+ case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
+ if (length > 0)
+ break;
+ fallthrough;
+ case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
+ ret = -EIO;
+ goto fail;
+ default:
+ break;
+ }
+
+fail:
+ return ret;
+}
+
+static int proxy_query(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct gsc_proxy_msg *to_gsc = gsc->proxy.to_gsc;
+ struct gsc_proxy_msg *to_csme = gsc->proxy.to_csme;
+ int ret;
+
+ intel_gsc_uc_heci_cmd_emit_mtl_header(&to_gsc->header,
+ HECI_MEADDRESS_PROXY,
+ sizeof(struct gsc_proxy_msg),
+ 0);
+
+ to_gsc->proxy_header.hdr =
+ FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
+ FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0);
+
+ to_gsc->proxy_header.source = GSC_PROXY_ADDRESSING_KMD;
+ to_gsc->proxy_header.destination = GSC_PROXY_ADDRESSING_GSC;
+ to_gsc->proxy_header.status = 0;
+
+ while (1) {
+ /* clear the GSC response header space */
+ memset(gsc->proxy.to_csme, 0, sizeof(struct gsc_proxy_msg));
+
+ /* send proxy message to GSC */
+ ret = proxy_send_to_gsc(gsc);
+ if (ret) {
+ gt_err(gt, "failed to send proxy message to GSC! %d\n", ret);
+ goto proxy_error;
+ }
+
+ /* stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme->proxy_header.hdr) ==
+ GSC_PROXY_MSG_TYPE_PROXY_END)
+ break;
+
+ /* make sure the GSC-to-CSME proxy header is sane */
+ ret = validate_proxy_header(&to_csme->proxy_header,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_CSME);
+ if (ret) {
+ gt_err(gt, "invalid GSC to CSME proxy header! %d\n", ret);
+ goto proxy_error;
+ }
+
+ /* send the GSC message to the CSME */
+ ret = proxy_send_to_csme(gsc);
+ if (ret < 0) {
+ gt_err(gt, "failed to send proxy message to CSME! %d\n", ret);
+ goto proxy_error;
+ }
+
+ /* update the GSC message size with the returned value from CSME */
+ to_gsc->header.message_size = ret + sizeof(struct intel_gsc_mtl_header);
+
+ /* make sure the CSME-to-GSC proxy header is sane */
+ ret = validate_proxy_header(&to_gsc->proxy_header,
+ GSC_PROXY_ADDRESSING_CSME,
+ GSC_PROXY_ADDRESSING_GSC);
+ if (ret) {
+ gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
+ goto proxy_error;
+ }
+ }
+
+proxy_error:
+ return ret < 0 ? ret : 0;
+}
+
+int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ int err;
+
+ if (!gsc->proxy.component_added)
+ return -ENODEV;
+
+ assert_rpm_wakelock_held(gt->uncore->rpm);
+
+ /* when GSC is loaded, we can queue this before the component is bound */
+ err = wait_for(gsc->proxy.component, GSC_PROXY_INIT_TIMEOUT_MS);
+ if (err) {
+ gt_err(gt, "GSC proxy component didn't bind within the expected timeout\n");
+ return -EIO;
+ }
+
+ mutex_lock(&gsc->proxy.mutex);
+ if (!gsc->proxy.component) {
+ gt_err(gt, "GSC proxy worker called without the component being bound!\n");
+ err = -EIO;
+ } else {
+ /*
+ * write the status bit to clear it and allow new proxy
+ * interrupts to be generated while we handle the current
+ * request, but be sure not to write the reset bit
+ */
+ intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ HECI_H_CSR_RST, HECI_H_CSR_IS);
+ err = proxy_query(gsc);
+ }
+ mutex_unlock(&gsc->proxy.mutex);
+ return err;
+}
+
+void intel_gsc_proxy_irq_handler(struct intel_gsc_uc *gsc, u32 iir)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+
+ if (unlikely(!iir))
+ return;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ if (!gsc->proxy.component) {
+ gt_err(gt, "GSC proxy irq received without the component being bound!\n");
+ return;
+ }
+
+ gsc->gsc_work_actions |= GSC_ACTION_SW_PROXY;
+ queue_work(gsc->wq, &gsc->work);
+}
+
+static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_gt *gt = i915->media_gt;
+ struct intel_gsc_uc *gsc = &gt->uc.gsc;
+ intel_wakeref_t wakeref;
+
+ /* enable HECI2 IRQs */
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ HECI_H_CSR_RST, HECI_H_CSR_IE);
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = data;
+ gsc->proxy.component->mei_dev = mei_kdev;
+ mutex_unlock(&gsc->proxy.mutex);
+
+ return 0;
+}
+
+static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_gt *gt = i915->media_gt;
+ struct intel_gsc_uc *gsc = &gt->uc.gsc;
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&gsc->proxy.mutex);
+ gsc->proxy.component = NULL;
+ mutex_unlock(&gsc->proxy.mutex);
+
+ /* disable HECI2 IRQs */
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
+}
+
+static const struct component_ops i915_gsc_proxy_component_ops = {
+ .bind = i915_gsc_proxy_component_bind,
+ .unbind = i915_gsc_proxy_component_unbind,
+};
+
+static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct i915_vma *vma;
+ void *vaddr;
+ int err;
+
+ err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ &vma, &vaddr);
+ if (err)
+ return err;
+
+ gsc->proxy.vma = vma;
+ gsc->proxy.to_gsc = vaddr;
+ gsc->proxy.to_csme = vaddr + GSC_PROXY_BUFFER_SIZE;
+
+ return 0;
+}
+
+static void proxy_channel_free(struct intel_gsc_uc *gsc)
+{
+ if (!gsc->proxy.vma)
+ return;
+
+ gsc->proxy.to_gsc = NULL;
+ gsc->proxy.to_csme = NULL;
+ i915_vma_unpin_and_release(&gsc->proxy.vma, I915_VMA_RELEASE_MAP);
+}
+
+void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (fetch_and_zero(&gsc->proxy.component_added))
+ component_del(i915->drm.dev, &i915_gsc_proxy_component_ops);
+
+ proxy_channel_free(gsc);
+}
+
+int intel_gsc_proxy_init(struct intel_gsc_uc *gsc)
+{
+ int err;
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_private *i915 = gt->i915;
+
+ mutex_init(&gsc->proxy.mutex);
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
+ gt_info(gt, "can't init GSC proxy due to missing mei component\n");
+ return -ENODEV;
+ }
+
+ err = proxy_channel_alloc(gsc);
+ if (err)
+ return err;
+
+ err = component_add_typed(i915->drm.dev, &i915_gsc_proxy_component_ops,
+ I915_COMPONENT_GSC_PROXY);
+ if (err < 0) {
+ gt_err(gt, "Failed to add GSC_PROXY component (%d)\n", err);
+ goto out_free;
+ }
+
+ gsc->proxy.component_added = true;
+
+ return 0;
+
+out_free:
+ proxy_channel_free(gsc);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.h
new file mode 100644
index 000000000000..fc5aef10bfb4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_PROXY_H_
+#define _INTEL_GSC_PROXY_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+
+int intel_gsc_proxy_init(struct intel_gsc_uc *gsc);
+void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc);
+int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc);
+void intel_gsc_proxy_irq_handler(struct intel_gsc_uc *gsc, u32 iir);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 2d5b70b3384c..fb0984f875f9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -10,15 +10,60 @@
#include "intel_gsc_uc.h"
#include "intel_gsc_fw.h"
#include "i915_drv.h"
+#include "intel_gsc_proxy.h"
static void gsc_work(struct work_struct *work)
{
struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
struct intel_gt *gt = gsc_uc_to_gt(gsc);
intel_wakeref_t wakeref;
+ u32 actions;
+ int ret;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ spin_lock_irq(gt->irq_lock);
+ actions = gsc->gsc_work_actions;
+ gsc->gsc_work_actions = 0;
+ spin_unlock_irq(gt->irq_lock);
+
+ if (actions & GSC_ACTION_FW_LOAD) {
+ ret = intel_gsc_uc_fw_upload(gsc);
+ if (ret == -EEXIST) /* skip proxy if not a new load */
+ actions &= ~GSC_ACTION_FW_LOAD;
+ else if (ret)
+ goto out_put;
+ }
+
+ if (actions & (GSC_ACTION_FW_LOAD | GSC_ACTION_SW_PROXY)) {
+ if (!intel_gsc_uc_fw_init_done(gsc)) {
+ gt_err(gt, "Proxy request received with GSC not loaded!\n");
+ goto out_put;
+ }
+
+ ret = intel_gsc_proxy_request_handler(gsc);
+ if (ret)
+ goto out_put;
+
+ /* mark the GSC FW init as done the first time we run this */
+ if (actions & GSC_ACTION_FW_LOAD) {
+ /*
+ * If there is a proxy establishment error, the GSC might still
+ * complete the request handling cleanly, so we need to check the
+ * status register to check if the proxy init was actually successful
+ */
+ if (intel_gsc_uc_fw_proxy_init_done(gsc)) {
+ drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n");
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ } else {
+ drm_err(&gt->i915->drm,
+ "GSC status reports proxy init not complete\n");
+ }
+ }
+ }
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- intel_gsc_uc_fw_upload(gsc);
+out_put:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool gsc_engine_supported(struct intel_gt *gt)
@@ -43,6 +88,8 @@ static bool gsc_engine_supported(struct intel_gt *gt)
void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+
intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC);
INIT_WORK(&gsc->work, gsc_work);
@@ -50,10 +97,16 @@ void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
* GT with it being not fully setup hence check device info's
* engine mask
*/
- if (!gsc_engine_supported(gsc_uc_to_gt(gsc))) {
+ if (!gsc_engine_supported(gt)) {
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
return;
}
+
+ gsc->wq = alloc_ordered_workqueue("i915_gsc", 0);
+ if (!gsc->wq) {
+ gt_err(gt, "failed to allocate WQ for GSC, disabling FW\n");
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+ }
}
int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
@@ -88,6 +141,9 @@ int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
gsc->ce = ce;
+ /* if we fail to init proxy we still want to load GSC for PM */
+ intel_gsc_proxy_init(gsc);
+
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
@@ -107,6 +163,12 @@ void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
return;
flush_work(&gsc->work);
+ if (gsc->wq) {
+ destroy_workqueue(gsc->wq);
+ gsc->wq = NULL;
+ }
+
+ intel_gsc_proxy_fini(gsc);
if (gsc->ce)
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
@@ -145,11 +207,17 @@ void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
return;
- queue_work(system_unbound_wq, &gsc->work);
+ spin_lock_irq(gt->irq_lock);
+ gsc->gsc_work_actions |= GSC_ACTION_FW_LOAD;
+ spin_unlock_irq(gt->irq_lock);
+
+ queue_work(gsc->wq, &gsc->work);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
index 5f50fa1ff8b9..a2a0813b8a76 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -10,6 +10,7 @@
struct i915_vma;
struct intel_context;
+struct i915_gsc_proxy_component;
struct intel_gsc_uc {
/* Generic uC firmware management */
@@ -19,7 +20,21 @@ struct intel_gsc_uc {
struct i915_vma *local; /* private memory for GSC usage */
struct intel_context *ce; /* for submission to GSC FW via GSC engine */
- struct work_struct work; /* for delayed load */
+ /* for delayed load and proxy handling */
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ u32 gsc_work_actions; /* protected by gt->irq_lock */
+#define GSC_ACTION_FW_LOAD BIT(0)
+#define GSC_ACTION_SW_PROXY BIT(1)
+
+ struct {
+ struct i915_gsc_proxy_component *component;
+ bool component_added;
+ struct i915_vma *vma;
+ void *to_gsc;
+ void *to_csme;
+ struct mutex mutex; /* protects the tee channel binding */
+ } proxy;
};
void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
index ea0da06e2f39..579c0f5a1438 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
@@ -107,3 +108,104 @@ void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
header->header_version = MTL_GSC_HEADER_VERSION;
header->message_size = message_size;
}
+
+static void
+emit_gsc_heci_pkt_nonpriv(u32 *cmd, struct intel_gsc_heci_non_priv_pkt *pkt)
+{
+ *cmd++ = GSC_HECI_CMD_PKT;
+ *cmd++ = lower_32_bits(pkt->addr_in);
+ *cmd++ = upper_32_bits(pkt->addr_in);
+ *cmd++ = pkt->size_in;
+ *cmd++ = lower_32_bits(pkt->addr_out);
+ *cmd++ = upper_32_bits(pkt->addr_out);
+ *cmd++ = pkt->size_out;
+ *cmd++ = 0;
+ *cmd++ = MI_BATCH_BUFFER_END;
+}
+
+int
+intel_gsc_uc_heci_cmd_submit_nonpriv(struct intel_gsc_uc *gsc,
+ struct intel_context *ce,
+ struct intel_gsc_heci_non_priv_pkt *pkt,
+ u32 *cmd, int timeout_ms)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_ww_ctx ww;
+ struct i915_request *rq;
+ int err, trials = 0;
+
+ i915_gem_ww_ctx_init(&ww, false);
+retry:
+ err = i915_gem_object_lock(pkt->bb_vma->obj, &ww);
+ if (err)
+ goto out_ww;
+ err = i915_gem_object_lock(pkt->heci_pkt_vma->obj, &ww);
+ if (err)
+ goto out_ww;
+ err = intel_context_pin_ww(ce, &ww);
+ if (err)
+ goto out_ww;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin_ce;
+ }
+
+ emit_gsc_heci_pkt_nonpriv(cmd, pkt);
+
+ err = i915_vma_move_to_active(pkt->bb_vma, rq, 0);
+ if (err)
+ goto out_rq;
+ err = i915_vma_move_to_active(pkt->heci_pkt_vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto out_rq;
+
+ engine = rq->context->engine;
+ if (engine->emit_init_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = engine->emit_bb_start(rq, i915_vma_offset(pkt->bb_vma), PAGE_SIZE, 0);
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+ if (err)
+ drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
+ "Failed emit-flush for gsc-heci-non-priv-pkterr=%d\n", err);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err) {
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
+ msecs_to_jiffies(timeout_ms)) < 0)
+ err = -ETIME;
+ }
+
+ i915_request_put(rq);
+
+out_unpin_ce:
+ intel_context_unpin(ce);
+out_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err) {
+ if (++trials < 10)
+ goto retry;
+ else
+ err = EAGAIN;
+ }
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
index 3d56ae501991..ef70e304904a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
@@ -8,12 +8,16 @@
#include <linux/types.h>
+struct i915_vma;
+struct intel_context;
struct intel_gsc_uc;
+
struct intel_gsc_mtl_header {
u32 validity_marker;
#define GSC_HECI_VALIDITY_MARKER 0xA578875A
u8 heci_client_id;
+#define HECI_MEADDRESS_PROXY 10
#define HECI_MEADDRESS_PXP 17
#define HECI_MEADDRESS_HDCP 18
@@ -47,7 +51,8 @@ struct intel_gsc_mtl_header {
* we distinguish the flags using OUTFLAG or INFLAG
*/
u32 flags;
-#define GSC_OUTFLAG_MSG_PENDING 1
+#define GSC_OUTFLAG_MSG_PENDING BIT(0)
+#define GSC_INFLAG_MSG_CLEANUP BIT(1)
u32 status;
} __packed;
@@ -58,4 +63,24 @@ int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc,
void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
u8 heci_client_id, u32 message_size,
u64 host_session_id);
+
+struct intel_gsc_heci_non_priv_pkt {
+ u64 addr_in;
+ u32 size_in;
+ u64 addr_out;
+ u32 size_out;
+ struct i915_vma *heci_pkt_vma;
+ struct i915_vma *bb_vma;
+};
+
+void
+intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
+ u8 heci_client_id, u32 msg_size,
+ u64 host_session_id);
+
+int
+intel_gsc_uc_heci_cmd_submit_nonpriv(struct intel_gsc_uc *gsc,
+ struct intel_context *ce,
+ struct intel_gsc_heci_non_priv_pkt *pkt,
+ u32 *cs, int timeout_ms);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index d76508fa3af7..f9bddaa876d9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -743,6 +743,13 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ /*
+ * Wa_22016122933: For MTL the shared memory needs to be mapped
+ * as WC on CPU side and UC (PAT index 2) on GPU side
+ */
+ if (IS_METEORLAKE(gt->i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index e46aac1a41e6..8dc291ff0093 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -42,6 +42,7 @@ struct intel_guc {
/** @capture: the error-state-capture module's data and objects */
struct intel_guc_state_capture *capture;
+ /** @dbgfs_node: debugfs node */
struct dentry *dbgfs_node;
/** @sched_engine: Global engine used to submit requests to GuC */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 69ce06faf8cd..63724e17829a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -643,6 +643,39 @@ static void guc_init_golden_context(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
+static u32 guc_get_capture_engine_mask(struct iosys_map *info_map, u32 capture_class)
+{
+ u32 mask;
+
+ switch (capture_class) {
+ case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
+ mask = info_map_read(info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
+ mask |= info_map_read(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
+ break;
+
+ case GUC_CAPTURE_LIST_CLASS_VIDEO:
+ mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
+ break;
+
+ case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
+ mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
+ break;
+
+ case GUC_CAPTURE_LIST_CLASS_BLITTER:
+ mask = info_map_read(info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
+ break;
+
+ case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
+ mask = info_map_read(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
+ break;
+
+ default:
+ mask = 0;
+ }
+
+ return mask;
+}
+
static int
guc_capture_prep_lists(struct intel_guc *guc)
{
@@ -678,9 +711,10 @@ guc_capture_prep_lists(struct intel_guc *guc)
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
+ u32 engine_mask = guc_get_capture_engine_mask(&info_map, j);
/* null list if we dont have said engine or list */
- if (!info_map_read(&info_map, engine_enabled_masks[j])) {
+ if (!engine_mask) {
if (ads_is_mapped) {
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index cf49188db6a6..0ff864da92df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -30,13 +30,15 @@
#define COMMON_BASE_GLOBAL \
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
-#define COMMON_GEN9BASE_GLOBAL \
- { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
- { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
+#define COMMON_GEN8BASE_GLOBAL \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+#define GEN8_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
+
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
@@ -94,66 +96,65 @@
{ GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
{ GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
-/* XE_LPD - Global */
-static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = {
+/* XE_LP Global */
+static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
COMMON_BASE_GLOBAL,
- COMMON_GEN9BASE_GLOBAL,
+ COMMON_GEN8BASE_GLOBAL,
COMMON_GEN12BASE_GLOBAL,
};
-/* XE_LPD - Render / Compute Per-Class */
-static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = {
+/* XE_LP Render / Compute Per-Class */
+static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
-/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */
-static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = {
+/* GEN8+ Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr gen8_rc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
-/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */
-static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = {
+/* GEN8+ Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr gen8_vd_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
-/* XE_LPD - Video Enhancement Per-Class */
-static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = {
+/* XE_LP Video Enhancement Per-Class */
+static const struct __guc_mmio_reg_descr xe_lp_vec_class_regs[] = {
COMMON_GEN12BASE_VEC,
};
-/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */
-static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = {
+/* GEN8+ Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr gen8_vec_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
-/* GEN9/XE_LPD - Blitter Per-Engine-Instance */
-static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
+/* GEN8+ Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr gen8_blt_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
-/* XE_LPD - GSC Per-Engine-Instance */
-static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
+/* XE_LP - GSC Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
-/* GEN9 - Global */
-static const struct __guc_mmio_reg_descr default_global_regs[] = {
+/* GEN8 - Global */
+static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
COMMON_BASE_GLOBAL,
- COMMON_GEN9BASE_GLOBAL,
+ COMMON_GEN8BASE_GLOBAL,
+ GEN8_GLOBAL,
};
-static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
+static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
/*
- * Empty lists:
- * GEN9/XE_LPD - Blitter Per-Class
- * GEN9/XE_LPD - Media Decode/Encode Per-Class
- * GEN9 - VEC Class
+ * Empty list to prevent warnings about unknown class/instance types
+ * as not all class/instanace types have entries on all platforms.
*/
static const struct __guc_mmio_reg_descr empty_regs_list[] = {
};
@@ -171,37 +172,33 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = {
}
/* List of lists */
-static const struct __guc_mmio_reg_descr_group default_lists[] = {
- MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
- MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
- MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
- MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
- MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
- MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
- MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
- MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
- MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
+static const struct __guc_mmio_reg_descr_group gen8_lists[] = {
+ MAKE_REGLIST(gen8_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(gen8_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
{}
};
-static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
- MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
- MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
- MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
- MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
- MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
- MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
- MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
- MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
- MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
- MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
- MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
+static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_lp_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_lp_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
{}
};
@@ -257,11 +254,15 @@ struct __ext_steer_reg {
i915_mcr_reg_t reg;
};
-static const struct __ext_steer_reg xe_extregs[] = {
+static const struct __ext_steer_reg gen8_extregs[] = {
{"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
{"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
};
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
+};
+
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
int slice_id, int subslice_id)
@@ -292,8 +293,8 @@ __alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
}
static void
-guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
- const struct __guc_mmio_reg_descr_group *lists)
+guc_capture_alloc_steered_lists(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists)
{
struct intel_gt *gt = guc_to_gt(guc);
int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
@@ -301,74 +302,20 @@ guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
struct __guc_mmio_reg_descr_group *extlists;
struct __guc_mmio_reg_descr *extarray;
struct sseu_dev_info *sseu;
+ bool has_xehpg_extregs;
- /* In XE_LPD we only have steered registers for the render-class */
+ /* steered registers currently only exist for the render-class */
list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
- GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
/* skip if extlists was previously allocated */
if (!list || guc->capture->extlists)
return;
- num_steer_regs = ARRAY_SIZE(xe_extregs);
-
- sseu = &gt->info.sseu;
- for_each_ss_steering(iter, gt, slice, subslice)
- num_tot_regs += num_steer_regs;
-
- if (!num_tot_regs)
- return;
-
- /* allocate an extra for an end marker */
- extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
- if (!extlists)
- return;
+ has_xehpg_extregs = GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55);
- if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
- kfree(extlists);
- return;
- }
-
- extarray = extlists[0].extlist;
- for_each_ss_steering(iter, gt, slice, subslice) {
- for (i = 0; i < num_steer_regs; ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
- ++extarray;
- }
- }
-
- guc->capture->extlists = extlists;
-}
-
-static const struct __ext_steer_reg xehpg_extregs[] = {
- {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
-};
-
-static bool __has_xehpg_extregs(u32 ipver)
-{
- return (ipver >= IP_VER(12, 55));
-}
-
-static void
-guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
- const struct __guc_mmio_reg_descr_group *lists,
- u32 ipver)
-{
- struct intel_gt *gt = guc_to_gt(guc);
- struct sseu_dev_info *sseu;
- int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
- const struct __guc_mmio_reg_descr_group *list;
- struct __guc_mmio_reg_descr_group *extlists;
- struct __guc_mmio_reg_descr *extarray;
-
- /* In XE_LP / HPG we only have render-class steering registers during error-capture */
- list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
- GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
- /* skip if extlists was previously allocated */
- if (!list || guc->capture->extlists)
- return;
-
- num_steer_regs = ARRAY_SIZE(xe_extregs);
- if (__has_xehpg_extregs(ipver))
+ num_steer_regs = ARRAY_SIZE(gen8_extregs);
+ if (has_xehpg_extregs)
num_steer_regs += ARRAY_SIZE(xehpg_extregs);
sseu = &gt->info.sseu;
@@ -390,11 +337,12 @@ guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
extarray = extlists[0].extlist;
for_each_ss_steering(iter, gt, slice, subslice) {
- for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
- __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ for (i = 0; i < ARRAY_SIZE(gen8_extregs); ++i) {
+ __fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice);
++extarray;
}
- if (__has_xehpg_extregs(ipver)) {
+
+ if (has_xehpg_extregs) {
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
__fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
++extarray;
@@ -410,26 +358,22 @@ static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ const struct __guc_mmio_reg_descr_group *lists;
- if (GRAPHICS_VER(i915) > 11) {
- /*
- * For certain engine classes, there are slice and subslice
- * level registers requiring steering. We allocate and populate
- * these at init time based on hw config add it as an extension
- * list at the end of the pre-populated render list.
- */
- if (IS_DG2(i915))
- guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55));
- else if (IS_XEHPSDV(i915))
- guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50));
- else
- guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
+ if (GRAPHICS_VER(i915) >= 12)
+ lists = xe_lp_lists;
+ else
+ lists = gen8_lists;
- return xe_lpd_lists;
- }
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these at init time based on hw config add it as an extension
+ * list at the end of the pre-populated render list.
+ */
+ guc_capture_alloc_steered_lists(guc, lists);
- /* if GuC submission is enabled on a non-POR platform, just use a common baseline */
- return default_lists;
+ return lists;
}
static const char *
@@ -453,17 +397,15 @@ static const char *
__stringify_engclass(u32 class)
{
switch (class) {
- case GUC_RENDER_CLASS:
- return "Render";
- case GUC_VIDEO_CLASS:
+ case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
+ return "Render/Compute";
+ case GUC_CAPTURE_LIST_CLASS_VIDEO:
return "Video";
- case GUC_VIDEOENHANCE_CLASS:
+ case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
return "VideoEnhance";
- case GUC_BLITTER_CLASS:
+ case GUC_CAPTURE_LIST_CLASS_BLITTER:
return "Blitter";
- case GUC_COMPUTE_CLASS:
- return "Compute";
- case GUC_GSC_OTHER_CLASS:
+ case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
return "GSC-Other";
default:
break;
@@ -1593,6 +1535,36 @@ void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
ee->guc_capture_node = NULL;
}
+bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
+ struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ struct __guc_capture_parsed_output *n;
+ struct intel_guc *guc;
+
+ if (!gt || !ce || !engine)
+ return false;
+
+ guc = &gt->uc.guc;
+ if (!guc->capture)
+ return false;
+
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on lrca, guc-id and engine
+ * identification.
+ */
+ list_for_each_entry(n, &guc->capture->outlist, link) {
+ if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(engine->guc_id) &&
+ n->eng_class == GUC_ID_TO_ENGINE_CLASS(engine->guc_id) &&
+ n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
+ return true;
+ }
+
+ return false;
+}
+
void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_engine_coredump *ee,
struct intel_context *ce)
@@ -1608,6 +1580,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
return;
GEM_BUG_ON(ee->guc_capture_node);
+
/*
* Look for a matching GuC reported error capture node from
* the internal output link-list based on lrca, guc-id and engine
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
index fbd3713c7832..302256d45431 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -11,6 +11,7 @@
struct drm_i915_error_state_buf;
struct guc_gt_system_info;
struct intel_engine_coredump;
+struct intel_engine_cs;
struct intel_context;
struct intel_gt;
struct intel_guc;
@@ -20,6 +21,8 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
const struct intel_engine_coredump *ee);
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
struct intel_context *ce);
+bool intel_guc_capture_is_matching_engine(struct intel_gt *gt, struct intel_context *ce,
+ struct intel_engine_cs *engine);
void intel_guc_capture_process(struct intel_guc *guc);
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 1803a633ed64..a22e33f37cae 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -13,6 +13,30 @@
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+enum {
+ CT_DEAD_ALIVE = 0,
+ CT_DEAD_SETUP,
+ CT_DEAD_WRITE,
+ CT_DEAD_DEADLOCK,
+ CT_DEAD_H2G_HAS_ROOM,
+ CT_DEAD_READ,
+ CT_DEAD_PROCESS_FAILED,
+};
+
+static void ct_dead_ct_worker_func(struct work_struct *w);
+
+#define CT_DEAD(ct, reason) \
+ do { \
+ if (!(ct)->dead_ct_reported) { \
+ (ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
+ queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
+ } \
+ } while (0)
+#else
+#define CT_DEAD(ct, reason) do { } while (0)
+#endif
+
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
{
return container_of(ct, struct intel_guc, ct);
@@ -93,6 +117,9 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
+#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
init_waitqueue_head(&ct->wq);
@@ -319,11 +346,16 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
ct->enabled = true;
ct->stall_time = KTIME_MAX;
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ ct->dead_ct_reported = false;
+ ct->dead_ct_reason = CT_DEAD_ALIVE;
+#endif
return 0;
err_out:
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, SETUP);
return err;
}
@@ -434,6 +466,7 @@ static int ct_write(struct intel_guc_ct *ct,
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
+ CT_DEAD(ct, WRITE);
ctb->broken = true;
return -EPIPE;
}
@@ -504,6 +537,7 @@ static inline bool ct_deadlocked(struct intel_guc_ct *ct)
CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
+ CT_DEAD(ct, DEADLOCK);
ct->ctbs.send.broken = true;
}
@@ -552,6 +586,7 @@ static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
head, ctb->size);
desc->status |= GUC_CTB_STATUS_OVERFLOW;
ctb->broken = true;
+ CT_DEAD(ct, H2G_HAS_ROOM);
return false;
}
@@ -902,12 +937,19 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
/* now update descriptor */
WRITE_ONCE(desc->head, head);
+ /*
+ * Wa_22016122933: Making sure the head update is
+ * visible to GuC right away
+ */
+ intel_guc_write_barrier(ct_to_guc(ct));
+
return available - len;
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
ctb->broken = true;
+ CT_DEAD(ct, READ);
return -EPIPE;
}
@@ -1057,6 +1099,7 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
if (unlikely(err)) {
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
ERR_PTR(err), 4 * request->size, request->msg);
+ CT_DEAD(ct, PROCESS_FAILED);
ct_free_msg(request);
}
@@ -1233,3 +1276,19 @@ void intel_guc_ct_print_info(struct intel_guc_ct *ct,
drm_printf(p, "Tail: %u\n",
ct->ctbs.recv.desc->tail);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+static void ct_dead_ct_worker_func(struct work_struct *w)
+{
+ struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
+ struct intel_guc *guc = ct_to_guc(ct);
+
+ if (ct->dead_ct_reported)
+ return;
+
+ ct->dead_ct_reported = true;
+
+ guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
+ intel_klog_error_capture(guc_to_gt(guc), (intel_engine_mask_t)~0U);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index f709a19c7e21..818415b64f4d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -85,6 +85,12 @@ struct intel_guc_ct {
/** @stall_time: time of first time a CTB submission is stalled */
ktime_t stall_time;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+ int dead_ct_reason;
+ bool dead_ct_reported;
+ struct work_struct dead_ct_worker;
+#endif
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 6fda3aec5c66..364d0d546ec8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -129,6 +129,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
case INTEL_BOOTROM_STATUS_MPUMAP_INCORRECT:
case INTEL_BOOTROM_STATUS_EXCEPTION:
+ case INTEL_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
*success = false;
return true;
}
@@ -190,8 +191,10 @@ static int guc_wait_ucode(struct intel_guc *guc)
if (!ret || !success)
break;
- guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz\n",
- count, intel_rps_read_actual_frequency(&uncore->gt->rps));
+ guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n",
+ count, intel_rps_read_actual_frequency(&uncore->gt->rps), status,
+ REG_FIELD_GET(GS_BOOTROM_MASK, status),
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
}
after = ktime_get();
delta = ktime_sub(after, before);
@@ -219,6 +222,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
guc_info(guc, "firmware signature verification failed\n");
ret = -ENOEXEC;
break;
+
+ case INTEL_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
+ guc_info(guc, "firmware production part check failure\n");
+ ret = -ENOEXEC;
+ break;
}
switch (ukernel) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 4ae5fc2f6002..4e57bd09d50d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -411,6 +411,15 @@ enum guc_capture_type {
GUC_CAPTURE_LIST_TYPE_MAX,
};
+/* Class indecies for capture_class and capture_instance arrays */
+enum {
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
+ GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
+ GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2,
+ GUC_CAPTURE_LIST_CLASS_BLITTER = 3,
+ GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4,
+};
+
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
@@ -451,7 +460,7 @@ enum guc_log_buffer_type {
GUC_MAX_LOG_BUFFER
};
-/**
+/*
* struct guc_log_buffer_state - GuC log buffer state
*
* Below state structure is used for coordination of retrieval of GuC firmware
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 026d73855f36..01b75529311c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -277,6 +277,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
+ slpc->ignore_eff_freq = false;
slpc->min_is_rpmax = false;
slpc->boost_freq = 0;
@@ -457,6 +458,29 @@ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret;
+
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val);
+ if (ret)
+ guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
+ val, ERR_PTR(ret));
+ else
+ slpc->ignore_eff_freq = val;
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+ return ret;
+}
+
/**
* intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
@@ -482,16 +506,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
mutex_lock(&slpc->lock);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- /* Ignore efficient freq if lower min freq is requested */
- ret = slpc_set_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- val < slpc->rp1_freq);
- if (ret) {
- guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
- ERR_PTR(ret));
- goto out;
- }
-
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
@@ -499,7 +513,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
if (!ret)
slpc->min_freq_softlimit = val;
-out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
@@ -752,6 +765,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Set cached value of ignore efficient freq */
+ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+
return 0;
}
@@ -821,6 +837,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
slpc_decode_min_freq(slpc));
drm_printf(p, "\twaitboosts: %u\n",
slpc->num_boosts);
+ drm_printf(p, "\tBoosts outstanding: %u\n",
+ atomic_read(&slpc->num_waiters));
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 17ed515f6a85..597eb5413ddf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -46,5 +46,6 @@ void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc);
int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode);
+int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index a6ef53b04e04..a88651331497 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -31,6 +31,7 @@ struct intel_guc_slpc {
/* frequency softlimits */
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+ bool ignore_eff_freq;
/* cached media ratio mode */
u32 media_ratio_mode;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 88e881b100cf..a0e3ef1c65d2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1402,13 +1402,34 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __guc_context_update_stats(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ lrc_update_runtime(ce);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
+static void guc_context_update_stats(struct intel_context *ce)
+{
+ if (!intel_context_pin_if_active(ce))
+ return;
+
+ __guc_context_update_stats(ce);
+ intel_context_unpin(ce);
+}
+
static void guc_timestamp_ping(struct work_struct *wrk)
{
struct intel_guc *guc = container_of(wrk, typeof(*guc),
timestamp.work.work);
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_context *ce;
intel_wakeref_t wakeref;
+ unsigned long index;
int srcu, ret;
/*
@@ -1424,6 +1445,10 @@ static void guc_timestamp_ping(struct work_struct *wrk)
with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
__update_guc_busyness_stats(guc);
+ /* adjust context stats for overflow */
+ xa_for_each(&guc->context_lookup, index, ce)
+ guc_context_update_stats(ce);
+
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
@@ -1629,16 +1654,16 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
{
- if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
- return;
-
- intel_engine_stop_cs(engine);
-
/*
* Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
- intel_engine_wait_for_pending_mi_fw(engine);
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) {
+ intel_engine_stop_cs(engine);
+ intel_engine_wait_for_pending_mi_fw(engine);
+ }
}
static void guc_reset_nop(struct intel_engine_cs *engine)
@@ -2774,6 +2799,7 @@ static void guc_context_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
+ __guc_context_update_stats(ce);
unpin_guc_id(guc, ce);
lrc_unpin(ce);
@@ -3455,6 +3481,7 @@ static void remove_from_context(struct i915_request *rq)
}
static const struct intel_context_ops guc_context_ops = {
+ .flags = COPS_RUNTIME_CYCLES,
.alloc = guc_context_alloc,
.close = guc_context_close,
@@ -3473,6 +3500,8 @@ static const struct intel_context_ops guc_context_ops = {
.sched_disable = guc_context_sched_disable,
+ .update_stats = guc_context_update_stats,
+
.reset = lrc_reset,
.destroy = guc_context_destroy,
@@ -3728,6 +3757,7 @@ static int guc_virtual_context_alloc(struct intel_context *ce)
}
static const struct intel_context_ops virtual_guc_context_ops = {
+ .flags = COPS_RUNTIME_CYCLES,
.alloc = guc_virtual_context_alloc,
.close = guc_context_close,
@@ -3745,6 +3775,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.exit = guc_virtual_context_exit,
.sched_disable = guc_context_sched_disable,
+ .update_stats = guc_context_update_stats,
.destroy = guc_context_destroy,
@@ -4697,13 +4728,37 @@ static void capture_error_state(struct intel_guc *guc,
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
- struct intel_engine_cs *engine = __context_to_physical_engine(ce);
intel_wakeref_t wakeref;
+ intel_engine_mask_t engine_mask;
+
+ if (intel_engine_is_virtual(ce->engine)) {
+ struct intel_engine_cs *e;
+ intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
+
+ engine_mask = 0;
+ for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
+ bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
+
+ if (match) {
+ intel_engine_set_hung_context(e, ce);
+ engine_mask |= e->mask;
+ atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
+ }
+ }
+
+ if (!engine_mask) {
+ guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s",
+ ce->guc_id.id, ce->engine->name);
+ engine_mask = ~0U;
+ }
+ } else {
+ intel_engine_set_hung_context(ce->engine, ce);
+ engine_mask = ce->engine->mask;
+ atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
+ }
- intel_engine_set_hung_context(engine, ce);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
- atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
+ i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
}
static void guc_context_replay(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 4ccb4be4c9cb..c8b9cbb7ba3a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -18,6 +18,7 @@
#include "intel_uc.h"
#include "i915_drv.h"
+#include "i915_hwmon.h"
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
@@ -431,6 +432,9 @@ static bool uc_is_wopcm_locked(struct intel_uc *uc)
static int __uc_check_hw(struct intel_uc *uc)
{
+ if (uc->fw_table_invalid)
+ return -EIO;
+
if (!intel_uc_supports_guc(uc))
return 0;
@@ -461,6 +465,7 @@ static int __uc_init_hw(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret, attempts;
+ bool pl1en = false;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
@@ -491,6 +496,9 @@ static int __uc_init_hw(struct intel_uc *uc)
else
attempts = 1;
+ /* Disable a potentially low PL1 power limit to allow freq to be raised */
+ i915_hwmon_power_max_disable(gt->i915, &pl1en);
+
intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
while (attempts--) {
@@ -500,7 +508,7 @@ static int __uc_init_hw(struct intel_uc *uc)
*/
ret = __uc_sanitize(uc);
if (ret)
- goto err_out;
+ goto err_rps;
intel_huc_fw_upload(huc);
intel_guc_ads_reset(guc);
@@ -547,6 +555,8 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
+ i915_hwmon_power_max_restore(gt->i915, pl1en);
+
guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
@@ -559,10 +569,12 @@ err_submission:
intel_guc_submission_disable(guc);
err_log_capture:
__uc_capture_load_err_log(uc);
-err_out:
+err_rps:
/* Return GT back to RPn */
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+ i915_hwmon_power_max_restore(gt->i915, pl1en);
+err_out:
__uc_sanitize(uc);
if (!ret) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 5d0f1bcc381e..d585524d94de 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -36,6 +36,7 @@ struct intel_uc {
struct drm_i915_gem_object *load_err_log;
bool reset_in_progress;
+ bool fw_table_invalid;
};
void intel_uc_init_early(struct intel_uc *uc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index c36e68e23a14..dc5c96c503a9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -17,6 +17,12 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#define UNEXPECTED gt_probe_error
+#else
+#define UNEXPECTED gt_notice
+#endif
+
static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
@@ -79,14 +85,15 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* security fixes, etc. to be enabled.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
- fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
- fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
+ fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
- fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
+ fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \
fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
@@ -140,7 +147,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
__stringify(patch_) ".bin"
/* Minor for internal driver use, not part of file name */
-#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
+#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_) \
__MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
@@ -196,9 +203,9 @@ struct __packed uc_fw_blob {
{ UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
.legacy = true }
-#define GUC_FW_BLOB(prefix_, major_, minor_) \
- UC_FW_BLOB_NEW(major_, minor_, 0, false, \
- MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
+#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_NEW(major_, minor_, patch_, false, \
+ MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_))
#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
UC_FW_BLOB_OLD(major_, minor_, patch_, \
@@ -232,20 +239,22 @@ struct fw_blobs_by_type {
u32 count;
};
+static const struct uc_fw_platform_requirement blobs_guc[] = {
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
+};
+
+static const struct uc_fw_platform_requirement blobs_huc[] = {
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
+};
+
+static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
+ [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
+ [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
+};
+
static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
- static const struct uc_fw_platform_requirement blobs_guc[] = {
- INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
- };
- static const struct uc_fw_platform_requirement blobs_huc[] = {
- INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
- };
- static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
- [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
- [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
- };
- static bool verified[INTEL_UC_FW_NUM_TYPES];
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
@@ -285,6 +294,11 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
continue;
if (uc_fw->file_selected.path) {
+ /*
+ * Continuing an earlier search after a found blob failed to load.
+ * Once the previously chosen path has been found, clear it out
+ * and let the search continue from there.
+ */
if (uc_fw->file_selected.path == blob->path)
uc_fw->file_selected.path = NULL;
@@ -295,6 +309,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->file_wanted.path = blob->path;
uc_fw->file_wanted.ver.major = blob->major;
uc_fw->file_wanted.ver.minor = blob->minor;
+ uc_fw->file_wanted.ver.patch = blob->patch;
uc_fw->loaded_via_gsc = blob->loaded_via_gsc;
found = true;
break;
@@ -304,76 +319,111 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
/* Failed to find a match for the last attempt?! */
uc_fw->file_selected.path = NULL;
}
+}
- /* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified[uc_fw->type]) {
- verified[uc_fw->type] = true;
+static bool validate_fw_table_type(struct drm_i915_private *i915, enum intel_uc_fw_type type)
+{
+ const struct uc_fw_platform_requirement *fw_blobs;
+ u32 fw_count;
+ int i, j;
+
+ if (type >= ARRAY_SIZE(blobs_all)) {
+ drm_err(&i915->drm, "No blob array for %s\n", intel_uc_fw_type_repr(type));
+ return false;
+ }
+
+ fw_blobs = blobs_all[type].blobs;
+ fw_count = blobs_all[type].count;
+
+ if (!fw_count)
+ return true;
- for (i = 1; i < fw_count; i++) {
- /* Next platform is good: */
- if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ /* make sure the list is ordered as expected */
+ for (i = 1; i < fw_count; i++) {
+ /* Versionless file names must be unique per platform: */
+ for (j = i + 1; j < fw_count; j++) {
+ /* Same platform? */
+ if (fw_blobs[i].p != fw_blobs[j].p)
continue;
- /* Next platform revision is good: */
- if (fw_blobs[i].p == fw_blobs[i - 1].p &&
- fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ if (fw_blobs[i].blob.path != fw_blobs[j].blob.path)
continue;
- /* Platform/revision must be in order: */
- if (fw_blobs[i].p != fw_blobs[i - 1].p ||
- fw_blobs[i].rev != fw_blobs[i - 1].rev)
- goto bad;
+ drm_err(&i915->drm, "Duplicate %s blobs: %s r%u %s%d.%d.%d [%s] matches %s%d.%d.%d [%s]\n",
+ intel_uc_fw_type_repr(type),
+ intel_platform_name(fw_blobs[j].p), fw_blobs[j].rev,
+ fw_blobs[j].blob.legacy ? "L" : "v",
+ fw_blobs[j].blob.major, fw_blobs[j].blob.minor,
+ fw_blobs[j].blob.patch, fw_blobs[j].blob.path,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major, fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch, fw_blobs[i].blob.path);
+ }
- /* Next major version is good: */
- if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
- continue;
+ /* Next platform is good: */
+ if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ continue;
- /* New must be before legacy: */
- if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
- goto bad;
+ /* Next platform revision is good: */
+ if (fw_blobs[i].p == fw_blobs[i - 1].p &&
+ fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ continue;
- /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
- if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
- if (!fw_blobs[i - 1].blob.major)
- continue;
+ /* Platform/revision must be in order: */
+ if (fw_blobs[i].p != fw_blobs[i - 1].p ||
+ fw_blobs[i].rev != fw_blobs[i - 1].rev)
+ goto bad;
- if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
- continue;
- }
+ /* Next major version is good: */
+ if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
+ continue;
- /* Major versions must be in order: */
- if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
- goto bad;
+ /* New must be before legacy: */
+ if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
+ goto bad;
- /* Next minor version is good: */
- if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
+ if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
+ if (!fw_blobs[i - 1].blob.major)
continue;
- /* Minor versions must be in order: */
- if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
- goto bad;
-
- /* Patch versions must be in order: */
- if (fw_blobs[i].blob.patch <= fw_blobs[i - 1].blob.patch)
+ if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
continue;
+ }
-bad:
- drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
- fw_blobs[i - 1].blob.legacy ? "L" : "v",
- fw_blobs[i - 1].blob.major,
- fw_blobs[i - 1].blob.minor,
- fw_blobs[i - 1].blob.patch,
- intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
- fw_blobs[i].blob.legacy ? "L" : "v",
- fw_blobs[i].blob.major,
- fw_blobs[i].blob.minor,
- fw_blobs[i].blob.patch);
+ /* Major versions must be in order: */
+ if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
+ goto bad;
- uc_fw->file_selected.path = NULL;
- }
+ /* Next minor version is good: */
+ if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ continue;
+
+ /* Minor versions must be in order: */
+ if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
+ goto bad;
+
+ /* Patch versions must be in order and unique: */
+ if (fw_blobs[i].blob.patch < fw_blobs[i - 1].blob.patch)
+ continue;
+
+bad:
+ drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_uc_fw_type_repr(type),
+ intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
+ fw_blobs[i - 1].blob.legacy ? "L" : "v",
+ fw_blobs[i - 1].blob.major,
+ fw_blobs[i - 1].blob.minor,
+ fw_blobs[i - 1].blob.patch,
+ intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major,
+ fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch);
+ return false;
}
+
+ return true;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
@@ -428,7 +478,8 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type)
{
- struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
+ struct intel_gt *gt = ____uc_fw_to_gt(uc_fw, type);
+ struct drm_i915_private *i915 = gt->i915;
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
@@ -441,6 +492,12 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
uc_fw->type = type;
if (HAS_GT_UC(i915)) {
+ if (!validate_fw_table_type(i915, type)) {
+ gt->uc.fw_table_invalid = true;
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+ return;
+ }
+
__uc_fw_auto_select(i915, uc_fw);
__uc_fw_user_override(i915, uc_fw);
}
@@ -782,10 +839,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
- gt_notice(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
+ UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
@@ -793,6 +850,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
} else {
if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
old_ver = true;
+ else if ((uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
+ (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
+ old_ver = true;
}
}
@@ -800,12 +860,16 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
- gt_notice(gt, "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->file_wanted.path,
- uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
- uc_fw->file_selected.path,
- uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
+ UNEXPECTED(gt, "%s firmware %s (%d.%d.%d) is recommended, but only %s (%d.%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.ver.major,
+ uc_fw->file_wanted.ver.minor,
+ uc_fw->file_wanted.ver.patch,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
INTEL_UC_FIRMWARE_URL);
}
@@ -893,9 +957,15 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
pte_flags |= PTE_LM;
if (ggtt->vm.raw_insert_entries)
- ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ ggtt->vm.raw_insert_entries(&ggtt->vm, dummy,
+ i915_gem_get_pat_index(ggtt->vm.i915,
+ I915_CACHE_NONE),
+ pte_flags);
else
- ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
+ ggtt->vm.insert_entries(&ggtt->vm, dummy,
+ i915_gem_get_pat_index(ggtt->vm.i915,
+ I915_CACHE_NONE),
+ pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 076c779f776a..eedd1865bb98 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -330,7 +330,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
/**
* intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
- * @param: vGPU creation params
+ * @conf: vGPU creation params
*
* This function is used to allocate HW resource for a vGPU. User specifies
* the resource configuration through the creation params.
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 7eb44132183a..77c676ecc263 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -49,9 +49,9 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
/**
* __i915_active_fence_init - prepares the activity tracker for use
- * @active - the active tracker
- * @fence - initial fence to track, can be NULL
- * @func - a callback when then the tracker is retired (becomes idle),
+ * @active: the active tracker
+ * @fence: initial fence to track, can be NULL
+ * @fn: a callback when then the tracker is retired (becomes idle),
* can be NULL
*
* i915_active_fence_init() prepares the embedded @active struct for use as
@@ -77,8 +77,8 @@ __i915_active_fence_set(struct i915_active_fence *active,
/**
* i915_active_fence_set - updates the tracker to watch the current fence
- * @active - the active tracker
- * @rq - the request to watch
+ * @active: the active tracker
+ * @rq: the request to watch
*
* i915_active_fence_set() watches the given @rq for completion. While
* that @rq is busy, the @active reports busy. When that @rq is signaled
@@ -89,7 +89,7 @@ i915_active_fence_set(struct i915_active_fence *active,
struct i915_request *rq);
/**
* i915_active_fence_get - return a reference to the active fence
- * @active - the active tracker
+ * @active: the active tracker
*
* i915_active_fence_get() returns a reference to the active fence,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
@@ -111,7 +111,7 @@ i915_active_fence_get(struct i915_active_fence *active)
/**
* i915_active_fence_isset - report whether the active tracker is assigned
- * @active - the active tracker
+ * @active: the active tracker
*
* i915_active_fence_isset() returns true if the active tracker is currently
* assigned to a fence. Due to the lazy retiring, that fence may be idle
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 80c2bf98e341..e39937c82d73 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -138,21 +138,54 @@ static const char *stringify_vma_type(const struct i915_vma *vma)
return "ppgtt";
}
-static const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
-{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
- case I915_CACHE_L3_LLC: return " L3+LLC";
- case I915_CACHE_WT: return " WT";
- default: return "";
+static const char *i915_cache_level_str(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+
+ if (IS_METEORLAKE(i915)) {
+ switch (obj->pat_index) {
+ case 0: return " WB";
+ case 1: return " WT";
+ case 2: return " UC";
+ case 3: return " WB (1-Way Coh)";
+ case 4: return " WB (2-Way Coh)";
+ default: return " not defined";
+ }
+ } else if (IS_PONTEVECCHIO(i915)) {
+ switch (obj->pat_index) {
+ case 0: return " UC";
+ case 1: return " WC";
+ case 2: return " WT";
+ case 3: return " WB";
+ case 4: return " WT (CLOS1)";
+ case 5: return " WB (CLOS1)";
+ case 6: return " WT (CLOS2)";
+ case 7: return " WT (CLOS2)";
+ default: return " not defined";
+ }
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ switch (obj->pat_index) {
+ case 0: return " WB";
+ case 1: return " WC";
+ case 2: return " WT";
+ case 3: return " UC";
+ default: return " not defined";
+ }
+ } else {
+ switch (obj->pat_index) {
+ case 0: return " UC";
+ case 1: return HAS_LLC(i915) ?
+ " LLC" : " snooped";
+ case 2: return " L3+LLC";
+ case 3: return " WT";
+ default: return " not defined";
+ }
}
}
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int pin_count = 0;
@@ -164,7 +197,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.size / 1024,
obj->read_domains,
obj->write_domain,
- i915_cache_level_str(dev_priv, obj->cache_level),
+ i915_cache_level_str(obj),
obj->mm.dirty ? " dirty" : "",
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index e8fa172ebe5e..d18d0a3ed905 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -147,11 +147,7 @@ void i915_drm_client_fdinfo(struct seq_file *m, struct file *f)
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
seq_printf(m, "drm-client-id:\t%u\n", client->id);
- /*
- * Temporarily skip showing client engine information with GuC submission till
- * fetching engine busyness is implemented in the GuC submission backend
- */
- if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc))
+ if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e771fdc3099c..f23b030aaf09 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -381,11 +381,11 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
}
/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__, id__) \
+#define for_each_engine(engine__, gt__, id__) \
for ((id__) = 0; \
(id__) < I915_NUM_ENGINES; \
(id__)++) \
- for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
+ for_each_if ((engine__) = (gt__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
@@ -407,11 +407,11 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(engine__) && (engine__)->uabi_class == (class__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
-#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
-#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
+#define INTEL_INFO(i915) (&(i915)->__info)
+#define RUNTIME_INFO(i915) (&(i915)->__runtime)
+#define DRIVER_CAPS(i915) (&(i915)->caps)
-#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
+#define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
@@ -431,7 +431,7 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
-#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
+#define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision)
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
@@ -516,135 +516,135 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
}
-#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
-#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
-
-#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
-#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
-#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
-#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
-#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
-#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
-#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
-#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
-#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
-#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
-#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
-#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
-#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
-#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
-#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
-#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
-#define IS_IRONLAKE_M(dev_priv) \
- (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
-#define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
-#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
-#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 1)
-#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
-#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
-#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
-#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
-#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
-#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
-#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
-#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
-#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
-#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
-#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
- IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
-#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
-#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
-#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1)
-#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
-#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
-#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG2)
-#define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, INTEL_PONTEVECCHIO)
-#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_METEORLAKE)
-
-#define IS_METEORLAKE_M(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
-#define IS_METEORLAKE_P(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
-#define IS_DG2_G10(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
-#define IS_DG2_G11(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
-#define IS_DG2_G12(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
-#define IS_ADLS_RPLS(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_N(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
-#define IS_ADLP_RPLP(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
-#define IS_ADLP_RPLU(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
-#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_BDW_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
-#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 1)
+#define IS_MOBILE(i915) (INTEL_INFO(i915)->is_mobile)
+#define IS_DGFX(i915) (INTEL_INFO(i915)->is_dgfx)
+
+#define IS_I830(i915) IS_PLATFORM(i915, INTEL_I830)
+#define IS_I845G(i915) IS_PLATFORM(i915, INTEL_I845G)
+#define IS_I85X(i915) IS_PLATFORM(i915, INTEL_I85X)
+#define IS_I865G(i915) IS_PLATFORM(i915, INTEL_I865G)
+#define IS_I915G(i915) IS_PLATFORM(i915, INTEL_I915G)
+#define IS_I915GM(i915) IS_PLATFORM(i915, INTEL_I915GM)
+#define IS_I945G(i915) IS_PLATFORM(i915, INTEL_I945G)
+#define IS_I945GM(i915) IS_PLATFORM(i915, INTEL_I945GM)
+#define IS_I965G(i915) IS_PLATFORM(i915, INTEL_I965G)
+#define IS_I965GM(i915) IS_PLATFORM(i915, INTEL_I965GM)
+#define IS_G45(i915) IS_PLATFORM(i915, INTEL_G45)
+#define IS_GM45(i915) IS_PLATFORM(i915, INTEL_GM45)
+#define IS_G4X(i915) (IS_G45(i915) || IS_GM45(i915))
+#define IS_PINEVIEW(i915) IS_PLATFORM(i915, INTEL_PINEVIEW)
+#define IS_G33(i915) IS_PLATFORM(i915, INTEL_G33)
+#define IS_IRONLAKE(i915) IS_PLATFORM(i915, INTEL_IRONLAKE)
+#define IS_IRONLAKE_M(i915) \
+ (IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915))
+#define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE)
+#define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE)
+#define IS_IVB_GT1(i915) (IS_IVYBRIDGE(i915) && \
+ INTEL_INFO(i915)->gt == 1)
+#define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW)
+#define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL)
+#define IS_BROADWELL(i915) IS_PLATFORM(i915, INTEL_BROADWELL)
+#define IS_SKYLAKE(i915) IS_PLATFORM(i915, INTEL_SKYLAKE)
+#define IS_BROXTON(i915) IS_PLATFORM(i915, INTEL_BROXTON)
+#define IS_KABYLAKE(i915) IS_PLATFORM(i915, INTEL_KABYLAKE)
+#define IS_GEMINILAKE(i915) IS_PLATFORM(i915, INTEL_GEMINILAKE)
+#define IS_COFFEELAKE(i915) IS_PLATFORM(i915, INTEL_COFFEELAKE)
+#define IS_COMETLAKE(i915) IS_PLATFORM(i915, INTEL_COMETLAKE)
+#define IS_ICELAKE(i915) IS_PLATFORM(i915, INTEL_ICELAKE)
+#define IS_JSL_EHL(i915) (IS_PLATFORM(i915, INTEL_JASPERLAKE) || \
+ IS_PLATFORM(i915, INTEL_ELKHARTLAKE))
+#define IS_TIGERLAKE(i915) IS_PLATFORM(i915, INTEL_TIGERLAKE)
+#define IS_ROCKETLAKE(i915) IS_PLATFORM(i915, INTEL_ROCKETLAKE)
+#define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1)
+#define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S)
+#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
+#define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV)
+#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
+#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
+#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
+
+#define IS_METEORLAKE_M(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_M)
+#define IS_METEORLAKE_P(i915) \
+ IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_P)
+#define IS_DG2_G10(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
+#define IS_DG2_G11(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11)
+#define IS_DG2_G12(i915) \
+ IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12)
+#define IS_ADLS_RPLS(i915) \
+ IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_N(i915) \
+ IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
+#define IS_ADLP_RPLP(i915) \
+ IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
+#define IS_ADLP_RPLU(i915) \
+ IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU)
+#define IS_HSW_EARLY_SDV(i915) (IS_HASWELL(i915) && \
+ (INTEL_DEVID(i915) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_BDW_GT3(i915) (IS_BROADWELL(i915) && \
+ INTEL_INFO(i915)->gt == 3)
+#define IS_HSW_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_HSW_GT3(i915) (IS_HASWELL(i915) && \
+ INTEL_INFO(i915)->gt == 3)
+#define IS_HSW_GT1(i915) (IS_HASWELL(i915) && \
+ INTEL_INFO(i915)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_SKL_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_KBL_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_KBL_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 2)
-#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 3)
-#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 4)
-#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 2)
-#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CFL_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CFL_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 2)
-#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 3)
-
-#define IS_CML_ULT(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
-#define IS_CML_ULX(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_CML_GT2(dev_priv) (IS_COMETLAKE(dev_priv) && \
- INTEL_INFO(dev_priv)->gt == 2)
-
-#define IS_ICL_WITH_PORT_F(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
-
-#define IS_TGL_UY(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
+#define IS_HSW_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_GT2(i915) (IS_SKYLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 2)
+#define IS_SKL_GT3(i915) (IS_SKYLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 3)
+#define IS_SKL_GT4(i915) (IS_SKYLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 4)
+#define IS_KBL_GT2(i915) (IS_KABYLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 2)
+#define IS_KBL_GT3(i915) (IS_KABYLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 3)
+#define IS_CFL_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CFL_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_CFL_GT2(i915) (IS_COFFEELAKE(i915) && \
+ INTEL_INFO(i915)->gt == 2)
+#define IS_CFL_GT3(i915) (IS_COFFEELAKE(i915) && \
+ INTEL_INFO(i915)->gt == 3)
+
+#define IS_CML_ULT(i915) \
+ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CML_ULX(i915) \
+ IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_CML_GT2(i915) (IS_COMETLAKE(i915) && \
+ INTEL_INFO(i915)->gt == 2)
+
+#define IS_ICL_WITH_PORT_F(i915) \
+ IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
+
+#define IS_TGL_UY(i915) \
+ IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY)
#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until))
-#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until))
-#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
- (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
+#define IS_KBL_GRAPHICS_STEP(i915, since, until) \
+ (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, since, until))
+#define IS_KBL_DISPLAY_STEP(i915, since, until) \
+ (IS_KABYLAKE(i915) && IS_DISPLAY_STEP(i915, since, until))
#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \
(IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until))
@@ -720,9 +720,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_PONTEVECCHIO(__i915) && \
IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv) (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
+#define IS_LP(i915) (INTEL_INFO(i915)->is_lp)
+#define IS_GEN9_LP(i915) (GRAPHICS_VER(i915) == 9 && IS_LP(i915))
+#define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_LP(i915))
#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
@@ -747,180 +747,180 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define CCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
-#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
+#define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
-#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
+#define CMDPARSER_USES_GGTT(i915) (GRAPHICS_VER(i915) == 7)
-#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
-#define HAS_4TILE(dev_priv) (INTEL_INFO(dev_priv)->has_4tile)
-#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
-#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
-#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
-#define HAS_WT(dev_priv) HAS_EDRAM(dev_priv)
+#define HAS_LLC(i915) (INTEL_INFO(i915)->has_llc)
+#define HAS_4TILE(i915) (INTEL_INFO(i915)->has_4tile)
+#define HAS_SNOOP(i915) (INTEL_INFO(i915)->has_snoop)
+#define HAS_EDRAM(i915) ((i915)->edram_size_mb)
+#define HAS_SECURE_BATCHES(i915) (GRAPHICS_VER(i915) < 6)
+#define HAS_WT(i915) HAS_EDRAM(i915)
-#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
+#define HWS_NEEDS_PHYSICAL(i915) (INTEL_INFO(i915)->hws_needs_physical)
-#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
-#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
- (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
+#define HAS_LOGICAL_RING_CONTEXTS(i915) \
+ (INTEL_INFO(i915)->has_logical_ring_contexts)
+#define HAS_LOGICAL_RING_ELSQ(i915) \
+ (INTEL_INFO(i915)->has_logical_ring_elsq)
-#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
+#define HAS_EXECLISTS(i915) HAS_LOGICAL_RING_CONTEXTS(i915)
-#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
-#define HAS_PPGTT(dev_priv) \
- (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
-#define HAS_FULL_PPGTT(dev_priv) \
- (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define INTEL_PPGTT(i915) (RUNTIME_INFO(i915)->ppgtt_type)
+#define HAS_PPGTT(i915) \
+ (INTEL_PPGTT(i915) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(i915) \
+ (INTEL_PPGTT(i915) >= INTEL_PPGTT_FULL)
-#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
+#define HAS_PAGE_SIZES(i915, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
+ ((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
+#define HAS_OVERLAY(i915) (INTEL_INFO(i915)->display.has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(i915) \
+ (INTEL_INFO(i915)->display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
+#define HAS_BROKEN_CS_TLB(i915) (IS_I830(i915) || IS_I845G(i915))
-#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
- (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
+#define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \
+ (IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9)
/* WaRsDisableCoarsePowerGating:skl,cnl */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
+#define NEEDS_WaRsDisableCoarsePowerGating(i915) \
+ (IS_SKL_GT3(i915) || IS_SKL_GT4(i915))
-#define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4)
-#define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \
- IS_GEMINILAKE(dev_priv) || \
- IS_KABYLAKE(dev_priv))
+#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
+#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 11 || \
+ IS_GEMINILAKE(i915) || \
+ IS_KABYLAKE(i915))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
- !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
+#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \
+ !(IS_I915G(i915) || IS_I915GM(i915)))
+#define SUPPORTS_TV(i915) (INTEL_INFO(i915)->display.supports_tv)
+#define I915_HAS_HOTPLUG(i915) (INTEL_INFO(i915)->display.has_hotplug)
-#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
-#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
-#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
+#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
+#define HAS_FBC(i915) (RUNTIME_INFO(i915)->fbc_mask != 0)
+#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7)
-#define HAS_DPT(dev_priv) (DISPLAY_VER(dev_priv) >= 13)
+#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
-#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
+#define HAS_IPS(i915) (IS_HSW_ULT(i915) || IS_BROADWELL(i915))
-#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+#define HAS_DP_MST(i915) (INTEL_INFO(i915)->display.has_dp_mst)
+#define HAS_DP20(i915) (IS_DG2(i915) || DISPLAY_VER(i915) >= 14)
-#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+#define HAS_DOUBLE_BUFFERED_M_N(i915) (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
-#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
-#define HAS_CDCLK_SQUASH(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_squash)
-#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
-#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_PSR_HW_TRACKING(dev_priv) \
- (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
-#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_CDCLK_CRAWL(i915) (INTEL_INFO(i915)->display.has_cdclk_crawl)
+#define HAS_CDCLK_SQUASH(i915) (INTEL_INFO(i915)->display.has_cdclk_squash)
+#define HAS_DDI(i915) (INTEL_INFO(i915)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(i915) (INTEL_INFO(i915)->display.has_fpga_dbg)
+#define HAS_PSR(i915) (INTEL_INFO(i915)->display.has_psr)
+#define HAS_PSR_HW_TRACKING(i915) \
+ (INTEL_INFO(i915)->display.has_psr_hw_tracking)
+#define HAS_PSR2_SEL_FETCH(i915) (DISPLAY_VER(i915) >= 12)
+#define HAS_TRANSCODER(i915, trans) ((RUNTIME_INFO(i915)->cpu_transcoder_mask & BIT(trans)) != 0)
-#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
-#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
-#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
+#define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6)
+#define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p)
+#define HAS_RC6pp(i915) (false) /* HW was never validated */
-#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
+#define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps)
-#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
-#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+#define HAS_DMC(i915) (RUNTIME_INFO(i915)->has_dmc)
+#define HAS_DSB(i915) (INTEL_INFO(i915)->display.has_dsb)
#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-#define HAS_HECI_PXP(dev_priv) \
- (INTEL_INFO(dev_priv)->has_heci_pxp)
+#define HAS_HECI_PXP(i915) \
+ (INTEL_INFO(i915)->has_heci_pxp)
-#define HAS_HECI_GSCFI(dev_priv) \
- (INTEL_INFO(dev_priv)->has_heci_gscfi)
+#define HAS_HECI_GSCFI(i915) \
+ (INTEL_INFO(i915)->has_heci_gscfi)
-#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
+#define HAS_HECI_GSC(i915) (HAS_HECI_PXP(i915) || HAS_HECI_GSCFI(i915))
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
-#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
+#define HAS_RUNTIME_PM(i915) (INTEL_INFO(i915)->has_runtime_pm)
+#define HAS_64BIT_RELOC(i915) (INTEL_INFO(i915)->has_64bit_reloc)
-#define HAS_OA_BPC_REPORTING(dev_priv) \
- (INTEL_INFO(dev_priv)->has_oa_bpc_reporting)
-#define HAS_OA_SLICE_CONTRIB_LIMITS(dev_priv) \
- (INTEL_INFO(dev_priv)->has_oa_slice_contrib_limits)
-#define HAS_OAM(dev_priv) \
- (INTEL_INFO(dev_priv)->has_oam)
+#define HAS_OA_BPC_REPORTING(i915) \
+ (INTEL_INFO(i915)->has_oa_bpc_reporting)
+#define HAS_OA_SLICE_CONTRIB_LIMITS(i915) \
+ (INTEL_INFO(i915)->has_oa_slice_contrib_limits)
+#define HAS_OAM(i915) \
+ (INTEL_INFO(i915)->has_oam)
/*
* Set this flag, when platform requires 64K GTT page sizes or larger for
* device local memory access.
*/
-#define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
+#define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages)
-#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv))
+#define HAS_IPC(i915) (INTEL_INFO(i915)->display.has_ipc)
+#define HAS_SAGV(i915) (DISPLAY_VER(i915) >= 9 && !IS_LP(i915))
#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
-#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
+#define HAS_EXTRA_GT_LIST(i915) (INTEL_INFO(i915)->extra_gt_list)
/*
* Platform has the dedicated compression control state for each lmem surfaces
* stored in lmem to support the 3D and media compression formats.
*/
-#define HAS_FLAT_CCS(dev_priv) (INTEL_INFO(dev_priv)->has_flat_ccs)
+#define HAS_FLAT_CCS(i915) (INTEL_INFO(i915)->has_flat_ccs)
-#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
+#define HAS_GT_UC(i915) (INTEL_INFO(i915)->has_gt_uc)
-#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
+#define HAS_POOLED_EU(i915) (RUNTIME_INFO(i915)->has_pooled_eu)
-#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+#define HAS_GLOBAL_MOCS_REGISTERS(i915) (INTEL_INFO(i915)->has_global_mocs)
-#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
+#define HAS_GMCH(i915) (INTEL_INFO(i915)->display.has_gmch)
#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
-#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10))
+#define HAS_LSPCON(i915) (IS_DISPLAY_VER(i915, 9, 10))
#define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read)
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
-#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
- 2 : HAS_L3_DPF(dev_priv))
+#define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf)
+#define NUM_L3_SLICES(i915) (IS_HSW_GT3(i915) ? \
+ 2 : HAS_L3_DPF(i915))
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
+#define INTEL_NUM_PIPES(i915) (hweight8(RUNTIME_INFO(i915)->pipe_mask))
-#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_DISPLAY(i915) (RUNTIME_INFO(i915)->pipe_mask != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
/* Only valid when HAS_DISPLAY() is true */
-#define INTEL_DISPLAY_ENABLED(dev_priv) \
- (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), \
- !(dev_priv)->params.disable_display && \
- !intel_opregion_headless_sku(dev_priv))
+#define INTEL_DISPLAY_ENABLED(i915) \
+ (drm_WARN_ON(&(i915)->drm, !HAS_DISPLAY(i915)), \
+ !(i915)->params.disable_display && \
+ !intel_opregion_headless_sku(i915))
-#define HAS_GUC_DEPRIVILEGE(dev_priv) \
- (INTEL_INFO(dev_priv)->has_guc_deprivilege)
+#define HAS_GUC_DEPRIVILEGE(i915) \
+ (INTEL_INFO(i915)->has_guc_deprivilege)
-#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
- IS_ALDERLAKE_S(dev_priv))
+#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || \
+ IS_ALDERLAKE_S(i915))
#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0a78bdbd36b1..e70b762f0b03 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -420,8 +420,11 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length;
if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ i915_gem_object_get_dma_address(obj,
+ offset >> PAGE_SHIFT),
+ node.start,
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE), 0);
} else {
page_base += offset & PAGE_MASK;
}
@@ -598,8 +601,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
- i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
- node.start, I915_CACHE_NONE, 0);
+ i915_gem_object_get_dma_address(obj,
+ offset >> PAGE_SHIFT),
+ node.start,
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE), 0);
wmb(); /* flush modifications to the GGTT (insert_page) */
} else {
page_base += offset & PAGE_MASK;
@@ -1142,6 +1148,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
unsigned int i;
int ret;
+ /*
+ * In the proccess of replacing cache_level with pat_index a tricky
+ * dependency is created on the definition of the enum i915_cache_level.
+ * in case this enum is changed, PTE encode would be broken.
+ * Add a WARNING here. And remove when we completely quit using this
+ * enum
+ */
+ BUILD_BUG_ON(I915_CACHE_NONE != 0 ||
+ I915_CACHE_LLC != 1 ||
+ I915_CACHE_L3_LLC != 2 ||
+ I915_CACHE_WT != 3 ||
+ I915_MAX_CACHE_LEVEL != 4);
+
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 2238e096c957..6f11d7eaa91a 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -5,6 +5,8 @@
#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_cmd_parser.h"
#include "i915_drv.h"
#include "i915_getparam.h"
@@ -102,6 +104,11 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
if (value < 0)
return value;
break;
+ case I915_PARAM_PXP_STATUS:
+ value = intel_pxp_get_readiness_status(i915->pxp);
+ if (value < 0)
+ return value;
+ break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
* earlier versions as 0, in effect their value is undefined as
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f020c0086fbc..ec368e700235 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -808,10 +808,15 @@ static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
for (ee = gt->engine; ee; ee = ee->next) {
const struct i915_vma_coredump *vma;
- if (ee->guc_capture_node)
- intel_guc_capture_print_engine_node(m, ee);
- else
+ if (gt->uc && gt->uc->guc.is_guc_capture) {
+ if (ee->guc_capture_node)
+ intel_guc_capture_print_engine_node(m, ee);
+ else
+ err_printf(m, " Missing GuC capture node for %s\n",
+ ee->engine->name);
+ } else {
error_print_engine(m, ee);
+ }
err_printf(m, " hung: %u\n", ee->hung);
err_printf(m, " engine reset count: %u\n", ee->reset_count);
@@ -1117,10 +1122,14 @@ i915_vma_coredump_create(const struct intel_gt *gt,
mutex_lock(&ggtt->error_mutex);
if (ggtt->vm.raw_insert_page)
ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
else
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(gt->i915,
+ I915_CACHE_NONE),
+ 0);
mb();
s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
@@ -2162,7 +2171,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
* i915_capture_error_state - capture an error record for later analysis
* @gt: intel_gt which originated the hang
* @engine_mask: hung engines
- *
+ * @dump_flags: dump flags
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
@@ -2219,3 +2228,135 @@ void i915_disable_error_state(struct drm_i915_private *i915, int err)
i915->gpu_error.first_error = ERR_PTR(err);
spin_unlock_irq(&i915->gpu_error.lock);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_klog_error_capture(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ static int g_count;
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_gpu_coredump *error;
+ intel_wakeref_t wakeref;
+ size_t buf_size = PAGE_SIZE * 128;
+ size_t pos_err;
+ char *buf, *ptr, *next;
+ int l_count = g_count++;
+ int line = 0;
+
+ /* Can't allocate memory during a reset */
+ if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ drm_err(&gt->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
+ l_count, line++);
+ return;
+ }
+
+ error = READ_ONCE(i915->gpu_error.first_error);
+ if (error) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
+ l_count, line++);
+ i915_reset_error_state(i915);
+ }
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(error)) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
+ l_count, line++, PTR_ERR(error));
+ return;
+ }
+
+ buf = kvmalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
+ l_count, line++);
+ i915_gpu_coredump_put(error);
+ return;
+ }
+
+ drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
+ l_count, line++, __builtin_return_address(0));
+
+ /* Largest string length safe to print via dmesg */
+# define MAX_CHUNK 800
+
+ pos_err = 0;
+ while (1) {
+ ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
+
+ if (got <= 0)
+ break;
+
+ buf[got] = 0;
+ pos_err += got;
+
+ ptr = buf;
+ while (got > 0) {
+ size_t count;
+ char tag[2];
+
+ next = strnchr(ptr, got, '\n');
+ if (next) {
+ count = next - ptr;
+ *next = 0;
+ tag[0] = '>';
+ tag[1] = '<';
+ } else {
+ count = got;
+ tag[0] = '}';
+ tag[1] = '{';
+ }
+
+ if (count > MAX_CHUNK) {
+ size_t pos;
+ char *ptr2 = ptr;
+
+ for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
+ char chr = ptr[pos];
+
+ ptr[pos] = 0;
+ drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
+ l_count, line++, ptr2);
+ ptr[pos] = chr;
+ ptr2 = ptr + pos;
+
+ /*
+ * If spewing large amounts of data via a serial console,
+ * this can be a very slow process. So be friendly and try
+ * not to cause 'softlockup on CPU' problems.
+ */
+ cond_resched();
+ }
+
+ if (ptr2 < (ptr + count))
+ drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
+ l_count, line++, tag[0], ptr2, tag[1]);
+ else if (tag[0] == '>')
+ drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
+ l_count, line++);
+ } else {
+ drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
+ l_count, line++, tag[0], ptr, tag[1]);
+ }
+
+ ptr = next;
+ got -= count;
+ if (next) {
+ ptr++;
+ got--;
+ }
+
+ /* As above. */
+ cond_resched();
+ }
+
+ if (got)
+ drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
+ l_count, line++, got);
+ }
+
+ kvfree(buf);
+
+ drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index a91932cc6531..a78c061ce26f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -258,6 +258,16 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
#define CORE_DUMP_FLAG_NONE 0x0
#define CORE_DUMP_FLAG_IS_GUC_CAPTURE BIT(0)
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+void intel_klog_error_capture(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask);
+#else
+static inline void intel_klog_error_capture(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8e7dccc8d3a0..a3bdd9f68a45 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -50,6 +50,8 @@ struct hwm_drvdata {
struct hwm_energy_info ei; /* Energy info for energy1_input */
char name[12];
int gt_n;
+ bool reset_in_progress;
+ wait_queue_head_t waitq;
};
struct i915_hwmon {
@@ -396,31 +398,56 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
{
struct i915_hwmon *hwmon = ddat->hwmon;
intel_wakeref_t wakeref;
+ DEFINE_WAIT(wait);
+ int ret = 0;
u32 nval;
- /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
- if (val == PL1_DISABLE) {
+ /* Block waiting for GuC reset to complete when needed */
+ for (;;) {
mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(ddat->uncore->rpm, wakeref) {
- intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
- PKG_PWR_LIM_1_EN, 0);
- nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
+
+ prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
+
+ if (!hwmon->ddat.reset_in_progress)
+ break;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
}
+
mutex_unlock(&hwmon->hwmon_lock);
+ schedule();
+ }
+ finish_wait(&ddat->waitq, &wait);
+ if (ret)
+ goto unlock;
+
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+
+ /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
+ if (val == PL1_DISABLE) {
+ intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN, 0);
+ nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit);
+
if (nval & PKG_PWR_LIM_1_EN)
- return -ENODEV;
- return 0;
+ ret = -ENODEV;
+ goto exit;
}
/* Computation in 64-bits to avoid overflow. Round to nearest. */
nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval);
- hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
- PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1,
- nval);
- return 0;
+ intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
+exit:
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
}
static int
@@ -470,6 +497,41 @@ hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val)
}
}
+void i915_hwmon_power_max_disable(struct drm_i915_private *i915, bool *old)
+{
+ struct i915_hwmon *hwmon = i915->hwmon;
+ u32 r;
+
+ if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
+ return;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ hwmon->ddat.reset_in_progress = true;
+ r = intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN, 0);
+ *old = !!(r & PKG_PWR_LIM_1_EN);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+}
+
+void i915_hwmon_power_max_restore(struct drm_i915_private *i915, bool old)
+{
+ struct i915_hwmon *hwmon = i915->hwmon;
+
+ if (!hwmon || !i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
+ return;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ intel_uncore_rmw(hwmon->ddat.uncore, hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1_EN, old ? PKG_PWR_LIM_1_EN : 0);
+ hwmon->ddat.reset_in_progress = false;
+ wake_up_all(&hwmon->ddat.waitq);
+
+ mutex_unlock(&hwmon->hwmon_lock);
+}
+
static umode_t
hwm_energy_is_visible(const struct hwm_drvdata *ddat, u32 attr)
{
@@ -742,6 +804,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
ddat->uncore = &i915->uncore;
snprintf(ddat->name, sizeof(ddat->name), "i915");
ddat->gt_n = -1;
+ init_waitqueue_head(&ddat->waitq);
for_each_gt(gt, i915, i) {
ddat_gt = hwmon->ddat_gt + i;
diff --git a/drivers/gpu/drm/i915/i915_hwmon.h b/drivers/gpu/drm/i915/i915_hwmon.h
index 7ca9cf2c34c9..0fcb7de84406 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.h
+++ b/drivers/gpu/drm/i915/i915_hwmon.h
@@ -7,14 +7,21 @@
#ifndef __I915_HWMON_H__
#define __I915_HWMON_H__
+#include <linux/types.h>
+
struct drm_i915_private;
+struct intel_gt;
#if IS_REACHABLE(CONFIG_HWMON)
void i915_hwmon_register(struct drm_i915_private *i915);
void i915_hwmon_unregister(struct drm_i915_private *i915);
+void i915_hwmon_power_max_disable(struct drm_i915_private *i915, bool *old);
+void i915_hwmon_power_max_restore(struct drm_i915_private *i915, bool old);
#else
static inline void i915_hwmon_register(struct drm_i915_private *i915) { };
static inline void i915_hwmon_unregister(struct drm_i915_private *i915) { };
+static inline void i915_hwmon_power_max_disable(struct drm_i915_private *i915, bool *old) { };
+static inline void i915_hwmon_power_max_restore(struct drm_i915_private *i915, bool old) { };
#endif
#endif /* __I915_HWMON_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d24bdea65a3d..53c83e257055 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2762,12 +2762,15 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt;
+ unsigned int i;
dg1_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(gt);
+ for_each_gt(gt, dev_priv, i)
+ gen11_gt_irq_reset(gt);
+
gen11_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
@@ -3425,11 +3428,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_uncore *uncore = gt->uncore;
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+ struct intel_gt *gt;
+ unsigned int i;
- gen11_gt_irq_postinstall(gt);
+ for_each_gt(gt, dev_priv, i)
+ gen11_gt_irq_postinstall(gt);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2a012da8ccfa..75cbccd1a441 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -29,6 +29,7 @@
#include "display/intel_display.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
+#include "gem/i915_gem_object_types.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -163,6 +164,38 @@
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
+#define LEGACY_CACHELEVEL \
+ .cachelevel_to_pat = { \
+ [I915_CACHE_NONE] = 0, \
+ [I915_CACHE_LLC] = 1, \
+ [I915_CACHE_L3_LLC] = 2, \
+ [I915_CACHE_WT] = 3, \
+ }
+
+#define TGL_CACHELEVEL \
+ .cachelevel_to_pat = { \
+ [I915_CACHE_NONE] = 3, \
+ [I915_CACHE_LLC] = 0, \
+ [I915_CACHE_L3_LLC] = 0, \
+ [I915_CACHE_WT] = 2, \
+ }
+
+#define PVC_CACHELEVEL \
+ .cachelevel_to_pat = { \
+ [I915_CACHE_NONE] = 0, \
+ [I915_CACHE_LLC] = 3, \
+ [I915_CACHE_L3_LLC] = 3, \
+ [I915_CACHE_WT] = 2, \
+ }
+
+#define MTL_CACHELEVEL \
+ .cachelevel_to_pat = { \
+ [I915_CACHE_NONE] = 2, \
+ [I915_CACHE_LLC] = 3, \
+ [I915_CACHE_L3_LLC] = 3, \
+ [I915_CACHE_WT] = 1, \
+ }
+
/* Keep in gen based order, and chronological order within a gen */
#define GEN_DEFAULT_PAGE_SIZES \
@@ -188,11 +221,13 @@
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
+ .max_pat_index = 3, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
#define I845_FEATURES \
GEN(2), \
@@ -209,11 +244,13 @@
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
+ .max_pat_index = 3, \
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I845_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
static const struct intel_device_info i830_info = {
I830_FEATURES,
@@ -248,11 +285,13 @@ static const struct intel_device_info i865g_info = {
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 32, \
+ .max_pat_index = 3, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
static const struct intel_device_info i915g_info = {
GEN3_FEATURES,
@@ -340,11 +379,13 @@ static const struct intel_device_info pnv_m_info = {
.has_snoop = true, \
.has_coherent_ggtt = true, \
.dma_mask_size = 36, \
+ .max_pat_index = 3, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
static const struct intel_device_info i965g_info = {
GEN4_FEATURES,
@@ -394,11 +435,13 @@ static const struct intel_device_info gm45_info = {
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
.dma_mask_size = 36, \
+ .max_pat_index = 3, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
static const struct intel_device_info ilk_d_info = {
GEN5_FEATURES,
@@ -428,13 +471,15 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6p = 0, \
.has_rps = true, \
.dma_mask_size = 40, \
+ .max_pat_index = 3, \
.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
.__runtime.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -481,13 +526,15 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
+ .max_pat_index = 3, \
.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
.__runtime.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -541,6 +588,7 @@ static const struct intel_device_info vlv_info = {
.display.has_gmch = 1,
.display.has_hotplug = 1,
.dma_mask_size = 40,
+ .max_pat_index = 3,
.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING,
.__runtime.ppgtt_size = 31,
.has_snoop = true,
@@ -552,6 +600,7 @@ static const struct intel_device_info vlv_info = {
I9XX_COLORS,
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_REGIONS,
+ LEGACY_CACHELEVEL,
};
#define G75_FEATURES \
@@ -639,6 +688,7 @@ static const struct intel_device_info chv_info = {
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.dma_mask_size = 39,
+ .max_pat_index = 3,
.__runtime.ppgtt_type = INTEL_PPGTT_FULL,
.__runtime.ppgtt_size = 32,
.has_reset_engine = 1,
@@ -650,6 +700,7 @@ static const struct intel_device_info chv_info = {
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_REGIONS,
+ LEGACY_CACHELEVEL,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -731,11 +782,13 @@ static const struct intel_device_info skl_gt4_info = {
.has_snoop = true, \
.has_coherent_ggtt = false, \
.display.has_ipc = 1, \
+ .max_pat_index = 3, \
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
GEN9_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_REGIONS
+ GEN_DEFAULT_REGIONS, \
+ LEGACY_CACHELEVEL
static const struct intel_device_info bxt_info = {
GEN9_LP_FEATURES,
@@ -889,9 +942,11 @@ static const struct intel_device_info jsl_info = {
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
TGL_CURSOR_OFFSETS, \
+ TGL_CACHELEVEL, \
.has_global_mocs = 1, \
.has_pxp = 1, \
- .display.has_dsb = 1
+ .display.has_dsb = 1, \
+ .max_pat_index = 3
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
@@ -1013,6 +1068,7 @@ static const struct intel_device_info adl_p_info = {
.__runtime.graphics.ip.ver = 12, \
.__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
+ TGL_CACHELEVEL, \
.dma_mask_size = 46, \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
@@ -1031,6 +1087,7 @@ static const struct intel_device_info adl_p_info = {
.has_reset_engine = 1, \
.has_rps = 1, \
.has_runtime_pm = 1, \
+ .max_pat_index = 3, \
.__runtime.ppgtt_size = 48, \
.__runtime.ppgtt_type = INTEL_PPGTT_FULL
@@ -1107,11 +1164,13 @@ static const struct intel_device_info pvc_info = {
PLATFORM(INTEL_PONTEVECCHIO),
NO_DISPLAY,
.has_flat_ccs = 0,
+ .max_pat_index = 7,
.__runtime.platform_engine_mask =
BIT(BCS0) |
BIT(VCS0) |
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
.require_force_probe = 1,
+ PVC_CACHELEVEL,
};
#define XE_LPDP_FEATURES \
@@ -1148,11 +1207,15 @@ static const struct intel_device_info mtl_info = {
.has_flat_ccs = 0,
.has_gmd_id = 1,
.has_guc_deprivilege = 1,
+ .has_llc = 0,
.has_mslice_steering = 0,
.has_snoop = 1,
+ .max_pat_index = 4,
+ .has_pxp = 1,
.__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
.__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
+ MTL_CACHELEVEL,
};
#undef PLATFORM
@@ -1344,6 +1407,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
+ if (intel_info->require_force_probe) {
+ dev_info(&pdev->dev, "Force probing unsupported Device ID %04x, tainting kernel\n",
+ pdev->device);
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 050b8ae7b8e7..19d5652300ee 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -5300,6 +5300,7 @@ void i915_perf_fini(struct drm_i915_private *i915)
/**
* i915_perf_ioctl_version - Version of the i915-perf subsystem
+ * @i915: The i915 device
*
* This version number is used by userspace to detect available features.
*/
diff --git a/drivers/gpu/drm/i915/i915_perf_oa_regs.h b/drivers/gpu/drm/i915/i915_perf_oa_regs.h
index ba103875e19f..e5ac7a8b5cb6 100644
--- a/drivers/gpu/drm/i915/i915_perf_oa_regs.h
+++ b/drivers/gpu/drm/i915/i915_perf_oa_regs.h
@@ -134,10 +134,6 @@
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
-#define GEN12_SQCNT1 _MMIO(0x8718)
-#define GEN12_SQCNT1_PMON_ENABLE REG_BIT(30)
-#define GEN12_SQCNT1_OABPC REG_BIT(29)
-
/* Gen12 OAM unit */
#define GEN12_OAM_HEAD_POINTER_OFFSET (0x1a0)
#define GEN12_OAM_HEAD_POINTER_MASK 0xffffffc0
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 7ece883a7d95..a814583e19fd 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -10,6 +10,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rc6.h"
@@ -50,16 +51,26 @@ static u8 engine_event_instance(struct perf_event *event)
return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
}
-static bool is_engine_config(u64 config)
+static bool is_engine_config(const u64 config)
{
return config < __I915_PMU_OTHER(0);
}
+static unsigned int config_gt_id(const u64 config)
+{
+ return config >> __I915_PMU_GT_SHIFT;
+}
+
+static u64 config_counter(const u64 config)
+{
+ return config & ~(~0ULL << __I915_PMU_GT_SHIFT);
+}
+
static unsigned int other_bit(const u64 config)
{
unsigned int val;
- switch (config) {
+ switch (config_counter(config)) {
case I915_PMU_ACTUAL_FREQUENCY:
val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
break;
@@ -77,7 +88,9 @@ static unsigned int other_bit(const u64 config)
return -1;
}
- return I915_ENGINE_SAMPLE_COUNT + val;
+ return I915_ENGINE_SAMPLE_COUNT +
+ config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT +
+ val;
}
static unsigned int config_bit(const u64 config)
@@ -88,9 +101,20 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
-static u64 config_mask(u64 config)
+static u32 config_mask(const u64 config)
{
- return BIT_ULL(config_bit(config));
+ unsigned int bit = config_bit(config);
+
+ if (__builtin_constant_p(config))
+ BUILD_BUG_ON(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+ else
+ WARN_ON_ONCE(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+
+ return BIT(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -103,6 +127,18 @@ static unsigned int event_bit(struct perf_event *event)
return config_bit(event->attr.config);
}
+static u32 frequency_enabled_mask(void)
+{
+ unsigned int i;
+ u32 mask = 0;
+
+ for (i = 0; i < I915_PMU_MAX_GTS; i++)
+ mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(i));
+
+ return mask;
+}
+
static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
@@ -119,9 +155,7 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_mask(I915_PMU_REQUESTED_FREQUENCY) |
- ENGINE_SAMPLE_MASK;
+ enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK;
/*
* When the GPU is idle per-engine counters do not need to be
@@ -163,9 +197,37 @@ static inline s64 ktime_since_raw(const ktime_t kt)
return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
+static unsigned int
+__sample_idx(struct i915_pmu *pmu, unsigned int gt_id, int sample)
+{
+ unsigned int idx = gt_id * __I915_NUM_PMU_SAMPLERS + sample;
+
+ GEM_BUG_ON(idx >= ARRAY_SIZE(pmu->sample));
+
+ return idx;
+}
+
+static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
+{
+ return pmu->sample[__sample_idx(pmu, gt_id, sample)].cur;
+}
+
+static void
+store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
+{
+ pmu->sample[__sample_idx(pmu, gt_id, sample)].cur = val;
+}
+
+static void
+add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
+{
+ pmu->sample[__sample_idx(pmu, gt_id, sample)].cur += mul_u32_u32(val, mul);
+}
+
static u64 get_rc6(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
bool awake = false;
@@ -180,7 +242,7 @@ static u64 get_rc6(struct intel_gt *gt)
spin_lock_irqsave(&pmu->lock, flags);
if (awake) {
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
* We think we are runtime suspended.
@@ -189,14 +251,14 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since_raw(pmu->sleep_last);
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
+ val = ktime_since_raw(pmu->sleep_last[gt_id]);
+ val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
}
- if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
- val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
+ if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
+ val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
else
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
spin_unlock_irqrestore(&pmu->lock, flags);
@@ -206,22 +268,29 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- intel_wakeref_t wakeref;
+ struct intel_gt *gt;
+ unsigned int i;
- with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) {
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
- pmu->sample[__I915_SAMPLE_RC6].cur;
- pmu->sleep_last = ktime_get_raw();
+ for_each_gt(gt, i915, i) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ u64 val = __get_rc6(gt);
+
+ store_sample(pmu, i, __I915_SAMPLE_RC6, val);
+ store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
+ val);
+ pmu->sleep_last[i] = ktime_get_raw();
+ }
}
}
-static void park_rc6(struct drm_i915_private *i915)
+static void park_rc6(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
- pmu->sleep_last = ktime_get_raw();
+ store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
+ pmu->sleep_last[gt->info.id] = ktime_get_raw();
}
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
@@ -235,29 +304,31 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
}
}
-void i915_pmu_gt_parked(struct drm_i915_private *i915)
+void i915_pmu_gt_parked(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
if (!pmu->base.event_init)
return;
spin_lock_irq(&pmu->lock);
- park_rc6(i915);
+ park_rc6(gt);
/*
* Signal sampling timer to stop if only engine events are enabled and
* GPU went idle.
*/
- pmu->timer_enabled = pmu_needs_timer(pmu, false);
+ pmu->unparked &= ~BIT(gt->info.id);
+ if (pmu->unparked == 0)
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
spin_unlock_irq(&pmu->lock);
}
-void i915_pmu_gt_unparked(struct drm_i915_private *i915)
+void i915_pmu_gt_unparked(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
if (!pmu->base.event_init)
return;
@@ -267,7 +338,10 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
/*
* Re-enable sampling timer when GPU goes active.
*/
- __i915_pmu_maybe_start_timer(pmu);
+ if (pmu->unparked == 0)
+ __i915_pmu_maybe_start_timer(pmu);
+
+ pmu->unparked |= BIT(gt->info.id);
spin_unlock_irq(&pmu->lock);
}
@@ -338,6 +412,9 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
return;
for_each_engine(engine, gt, id) {
+ if (!engine->pmu.enable)
+ continue;
+
if (!intel_engine_pm_get_if_awake(engine))
continue;
@@ -353,34 +430,30 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
}
}
-static void
-add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
-{
- sample->cur += mul_u32_u32(val, mul);
-}
-
-static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+static bool
+frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
{
return pmu->enable &
- (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt)));
}
static void
frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
- if (!frequency_sampling_enabled(pmu))
+ if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
u32 val;
/*
@@ -396,12 +469,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!val)
val = intel_gpu_freq(rps, rps->cur_freq);
- add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
val, period_ns / 1000);
}
- if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+ if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
intel_rps_get_requested_frequency(rps),
period_ns / 1000);
}
@@ -414,8 +487,9 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
- struct intel_gt *gt = to_gt(i915);
unsigned int period_ns;
+ struct intel_gt *gt;
+ unsigned int i;
ktime_t now;
if (!READ_ONCE(pmu->timer_enabled))
@@ -431,8 +505,14 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
- engines_sample(gt, period_ns);
- frequency_sample(gt, period_ns);
+
+ for_each_gt(gt, i915, i) {
+ if (!(pmu->unparked & BIT(i)))
+ continue;
+
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
+ }
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
@@ -473,7 +553,13 @@ config_status(struct drm_i915_private *i915, u64 config)
{
struct intel_gt *gt = to_gt(i915);
- switch (config) {
+ unsigned int gt_id = config_gt_id(config);
+ unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0;
+
+ if (gt_id > max_gt_id)
+ return -ENOENT;
+
+ switch (config_counter(config)) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
@@ -484,6 +570,8 @@ config_status(struct drm_i915_private *i915, u64 config)
return -ENODEV;
break;
case I915_PMU_INTERRUPTS:
+ if (gt_id)
+ return -ENOENT;
break;
case I915_PMU_RC6_RESIDENCY:
if (!gt->rc6.supported)
@@ -581,22 +669,27 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = engine->pmu.sample[sample].cur;
}
} else {
- switch (event->attr.config) {
+ const unsigned int gt_id = config_gt_id(event->attr.config);
+ const u64 config = config_counter(event->attr.config);
+
+ switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
val =
- div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_ACT),
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
- div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_REQ),
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(to_gt(i915));
+ val = get_rc6(i915->gt[gt_id]);
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
@@ -633,11 +726,10 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- unsigned int bit;
- bit = event_bit(event);
if (bit == -1)
goto update;
@@ -651,7 +743,7 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- pmu->enable |= BIT_ULL(bit);
+ pmu->enable |= BIT(bit);
pmu->enable_count[bit]++;
/*
@@ -698,7 +790,7 @@ static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_bit(event);
+ const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
@@ -734,7 +826,7 @@ static void i915_pmu_disable(struct perf_event *event)
* bitmask when the last listener on an event goes away.
*/
if (--pmu->enable_count[bit] == 0) {
- pmu->enable &= ~BIT_ULL(bit);
+ pmu->enable &= ~BIT(bit);
pmu->timer_enabled &= pmu_needs_timer(pmu, true);
}
@@ -848,11 +940,20 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-#define __event(__config, __name, __unit) \
+#define __event(__counter, __name, __unit) \
{ \
- .config = (__config), \
+ .counter = (__counter), \
.name = (__name), \
.unit = (__unit), \
+ .global = false, \
+}
+
+#define __global_event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = true, \
}
#define __engine_event(__sample, __name) \
@@ -891,15 +992,16 @@ create_event_attributes(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct {
- u64 config;
+ unsigned int counter;
const char *name;
const char *unit;
+ bool global;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
- __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
- __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
- __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
+ __event(0, "actual-frequency", "M"),
+ __event(1, "requested-frequency", "M"),
+ __global_event(2, "interrupts", NULL),
+ __event(3, "rc6-residency", "ns"),
+ __event(4, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
@@ -914,12 +1016,17 @@ create_event_attributes(struct i915_pmu *pmu)
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
- unsigned int i;
+ struct intel_gt *gt;
+ unsigned int i, j;
/* Count how many counters we will be exposing. */
- for (i = 0; i < ARRAY_SIZE(events); i++) {
- if (!config_status(i915, events[i].config))
- count++;
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+
+ if (!config_status(i915, config))
+ count++;
+ }
}
for_each_uabi_engine(engine, i915) {
@@ -949,26 +1056,39 @@ create_event_attributes(struct i915_pmu *pmu)
attr_iter = attr;
/* Initialize supported non-engine counters. */
- for (i = 0; i < ARRAY_SIZE(events); i++) {
- char *str;
-
- if (config_status(i915, events[i].config))
- continue;
-
- str = kstrdup(events[i].name, GFP_KERNEL);
- if (!str)
- goto err;
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+ char *str;
- *attr_iter++ = &i915_iter->attr.attr;
- i915_iter = add_i915_attr(i915_iter, str, events[i].config);
+ if (config_status(i915, config))
+ continue;
- if (events[i].unit) {
- str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kstrdup(events[i].name, GFP_KERNEL);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u",
+ events[i].name, j);
if (!str)
goto err;
- *attr_iter++ = &pmu_iter->attr.attr;
- pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter = add_i915_attr(i915_iter, str, config);
+
+ if (events[i].unit) {
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kasprintf(GFP_KERNEL, "%s.unit",
+ events[i].name);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u.unit",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str,
+ events[i].unit);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 449057648f39..33d80fbaab8b 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -13,8 +13,9 @@
#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
+struct intel_gt;
-/**
+/*
* Non-engine events that we need to track enabled-disabled transition and
* current state.
*/
@@ -25,7 +26,7 @@ enum i915_pmu_tracked_events {
__I915_PMU_TRACKED_EVENT_COUNT, /* count marker */
};
-/**
+/*
* Slots used from the sampling timer (non-engine events) with some extras for
* convenience.
*/
@@ -37,13 +38,16 @@ enum {
__I915_NUM_PMU_SAMPLERS
};
-/**
+#define I915_PMU_MAX_GTS 2
+
+/*
* How many different events we track in the global PMU mask.
*
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
- (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT)
+ (I915_ENGINE_SAMPLE_COUNT + \
+ I915_PMU_MAX_GTS * __I915_PMU_TRACKED_EVENT_COUNT)
#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
@@ -76,6 +80,10 @@ struct i915_pmu {
*/
spinlock_t lock;
/**
+ * @unparked: GT unparked mask.
+ */
+ unsigned int unparked;
+ /**
* @timer: Timer for internal i915 PMU sampling.
*/
struct hrtimer timer;
@@ -119,11 +127,11 @@ struct i915_pmu {
* Only global counters are held here, while the per-engine ones are in
* struct intel_engine_cs.
*/
- struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+ struct i915_pmu_sample sample[I915_PMU_MAX_GTS * __I915_NUM_PMU_SAMPLERS];
/**
* @sleep_last: Last time GT parked for RC6 estimation.
*/
- ktime_t sleep_last;
+ ktime_t sleep_last[I915_PMU_MAX_GTS];
/**
* @irq_count: Number of interrupts
*
@@ -151,15 +159,15 @@ int i915_pmu_init(void);
void i915_pmu_exit(void);
void i915_pmu_register(struct drm_i915_private *i915);
void i915_pmu_unregister(struct drm_i915_private *i915);
-void i915_pmu_gt_parked(struct drm_i915_private *i915);
-void i915_pmu_gt_unparked(struct drm_i915_private *i915);
+void i915_pmu_gt_parked(struct intel_gt *gt);
+void i915_pmu_gt_unparked(struct intel_gt *gt);
#else
static inline int i915_pmu_init(void) { return 0; }
static inline void i915_pmu_exit(void) {}
static inline void i915_pmu_register(struct drm_i915_private *i915) {}
static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
-static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {}
-static inline void i915_pmu_gt_unparked(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_parked(struct intel_gt *gt) {}
+static inline void i915_pmu_gt_unparked(struct intel_gt *gt) {}
#endif
#endif
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index f5e1bb5e857a..0ac55b2e4223 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -172,7 +172,7 @@ enum {
I915_FENCE_FLAG_COMPOSITE,
};
-/**
+/*
* Request queue structure.
*
* The request queue allows us to note sequence numbers that have been emitted
@@ -198,7 +198,7 @@ struct i915_request {
struct drm_i915_private *i915;
- /**
+ /*
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
* context, we must increment the context's refcount, to guarantee that
@@ -251,9 +251,9 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
- /**
- * @submit_work: complete submit fence from an IRQ if needed for
- * locking hierarchy reasons.
+ /*
+ * complete submit fence from an IRQ if needed for locking hierarchy
+ * reasons.
*/
struct irq_work submit_work;
@@ -277,35 +277,35 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /** Position in the ring of the start of the request */
+ /* Position in the ring of the start of the request */
u32 head;
- /** Position in the ring of the start of the user packets */
+ /* Position in the ring of the start of the user packets */
u32 infix;
- /**
+ /*
* Position in the ring of the start of the postfix.
* This is required to calculate the maximum available ring space
* without overwriting the postfix.
*/
u32 postfix;
- /** Position in the ring of the end of the whole request */
+ /* Position in the ring of the end of the whole request */
u32 tail;
- /** Position in the ring of the end of any workarounds after the tail */
+ /* Position in the ring of the end of any workarounds after the tail */
u32 wa_tail;
- /** Preallocate space in the ring for the emitting the request */
+ /* Preallocate space in the ring for the emitting the request */
u32 reserved_space;
- /** Batch buffer pointer for selftest internal use. */
+ /* Batch buffer pointer for selftest internal use. */
I915_SELFTEST_DECLARE(struct i915_vma *batch);
struct i915_vma_resource *batch_res;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- /**
+ /*
* Additional buffers requested by userspace to be captured upon
* a GPU hang. The vma/obj on this list are protected by their
* active reference - all objects on this list must also be
@@ -314,29 +314,29 @@ struct i915_request {
struct i915_capture_list *capture_list;
#endif
- /** Time at which this request was emitted, in jiffies. */
+ /* Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- /** timeline->request entry for this request */
+ /* timeline->request entry for this request */
struct list_head link;
- /** Watchdog support fields. */
+ /* Watchdog support fields. */
struct i915_request_watchdog {
struct llist_node link;
struct hrtimer timer;
} watchdog;
- /**
- * @guc_fence_link: Requests may need to be stalled when using GuC
- * submission waiting for certain GuC operations to complete. If that is
- * the case, stalled requests are added to a per context list of stalled
- * requests. The below list_head is the link in that list. Protected by
+ /*
+ * Requests may need to be stalled when using GuC submission waiting for
+ * certain GuC operations to complete. If that is the case, stalled
+ * requests are added to a per context list of stalled requests. The
+ * below list_head is the link in that list. Protected by
* ce->guc_state.lock.
*/
struct list_head guc_fence_link;
- /**
- * @guc_prio: Priority level while the request is in flight. Differs
+ /*
+ * Priority level while the request is in flight. Differs
* from i915 scheduler priority. See comment above
* I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
* ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
@@ -348,8 +348,8 @@ struct i915_request {
#define GUC_PRIO_FINI 0xfe
u8 guc_prio;
- /**
- * @hucq: wait queue entry used to wait on the HuC load to complete
+ /*
+ * wait queue entry used to wait on the HuC load to complete
*/
wait_queue_entry_t hucq;
@@ -473,7 +473,7 @@ i915_request_has_initial_breadcrumb(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
}
-/**
+/*
* Returns true if seq1 is later than seq2.
*/
static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index b0a1db44f895..5a10c1a31183 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -157,8 +157,7 @@ bool i915_sg_trim(struct sg_table *orig_st);
*/
struct i915_refct_sgt_ops {
/**
- * release() - Free the memory of the struct i915_refct_sgt
- * @ref: struct kref that is embedded in the struct i915_refct_sgt
+ * @release: Free the memory of the struct i915_refct_sgt
*/
void (*release)(struct kref *ref);
};
@@ -181,7 +180,7 @@ struct i915_refct_sgt {
/**
* i915_refct_sgt_put - Put a refcounted sg-table
- * @rsgt the struct i915_refct_sgt to put.
+ * @rsgt: the struct i915_refct_sgt to put.
*/
static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
{
@@ -191,7 +190,7 @@ static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
/**
* i915_refct_sgt_get - Get a refcounted sg-table
- * @rsgt the struct i915_refct_sgt to get.
+ * @rsgt: the struct i915_refct_sgt to get.
*/
static inline struct i915_refct_sgt *
i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
@@ -203,7 +202,7 @@ i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
/**
* __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
* operations structure
- * @rsgt The struct i915_refct_sgt to initialize.
+ * @rsgt: The struct i915_refct_sgt to initialize.
* @size: Size in bytes of the underlying memory buffer.
* @ops: A customized operations structure in case the refcounted sg-list
* is embedded into another structure.
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2c430c0c3bad..c61066498bf2 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -250,7 +250,7 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-/**
+/*
* __wait_for - magic wait macro
*
* Macro to help avoid open coding check/wait/timeout patterns. Note that it's
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 20a44788999e..a814775a363d 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -315,7 +315,7 @@ struct i915_vma_work {
struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *obj;
struct i915_sw_dma_fence_cb cb;
- enum i915_cache_level cache_level;
+ unsigned int pat_index;
unsigned int flags;
};
@@ -334,7 +334,7 @@ static void __vma_bind(struct dma_fence_work *work)
return;
vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
- vma_res, vw->cache_level, vw->flags);
+ vma_res, vw->pat_index, vw->flags);
}
static void __vma_release(struct dma_fence_work *work)
@@ -426,7 +426,7 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
- * @cache_level: mapping cache level
+ * @pat_index: PAT index to set in PTE
* @flags: flags like global or local mapping
* @work: preallocated worker for allocating and binding the PTE
* @vma_res: pointer to a preallocated vma resource. The resource is either
@@ -437,7 +437,7 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
* Note that DMA addresses are also the only part of the SG table we care about.
*/
int i915_vma_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags,
struct i915_vma_work *work,
struct i915_vma_resource *vma_res)
@@ -507,7 +507,7 @@ int i915_vma_bind(struct i915_vma *vma,
struct dma_fence *prev;
work->vma_res = i915_vma_resource_get(vma->resource);
- work->cache_level = cache_level;
+ work->pat_index = pat_index;
work->flags = bind_flags;
/*
@@ -537,7 +537,7 @@ int i915_vma_bind(struct i915_vma *vma,
return ret;
}
- vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
+ vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
bind_flags);
}
@@ -814,7 +814,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
color = 0;
if (i915_vm_has_cache_coloring(vma->vm))
- color = vma->obj->cache_level;
+ color = vma->obj->pat_index;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
@@ -1518,7 +1518,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!vma->pages);
err = i915_vma_bind(vma,
- vma->obj->cache_level,
+ vma->obj->pat_index,
flags, work, vma_res);
vma_res = NULL;
if (err)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index ed5c9d682a1b..9a9729205d5b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -132,7 +132,7 @@ static inline u64 __i915_vma_size(const struct i915_vma *vma)
}
/**
- * i915_vma_offset - Obtain the va range size of the vma
+ * i915_vma_size - Obtain the va range size of the vma
* @vma: The vma
*
* GPU virtual address space may be allocated with padding. This
@@ -250,7 +250,7 @@ i915_vma_compare(struct i915_vma *vma,
struct i915_vma_work *i915_vma_work(void);
int i915_vma_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags,
struct i915_vma_work *work,
struct i915_vma_resource *vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index c1864e3d0b43..ca2b0f7f59bc 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -34,6 +34,27 @@ struct i915_page_sizes {
};
/**
+ * struct i915_vma_bindinfo - Information needed for async bind
+ * only but that can be dropped after the bind has taken place.
+ * Consider making this a separate argument to the bind_vma
+ * op, coalescing with other arguments like vm, stash, cache_level
+ * and flags
+ * @pages: The pages sg-table.
+ * @page_sizes: Page sizes of the pages.
+ * @pages_rsgt: Refcounted sg-table when delayed object destruction
+ * is supported. May be NULL.
+ * @readonly: Whether the vma should be bound read-only.
+ * @lmem: Whether the vma points to lmem.
+ */
+struct i915_vma_bindinfo {
+ struct sg_table *pages;
+ struct i915_page_sizes page_sizes;
+ struct i915_refct_sgt *pages_rsgt;
+ bool readonly:1;
+ bool lmem:1;
+};
+
+/**
* struct i915_vma_resource - Snapshotted unbind information.
* @unbind_fence: Fence to mark unbinding complete. Note that this fence
* is not considered published until unbind is scheduled, and as such it
@@ -47,6 +68,7 @@ struct i915_page_sizes {
* @chain: Pointer to struct i915_sw_fence used to await dependencies.
* @rb: Rb node for the vm's pending unbind interval tree.
* @__subtree_last: Interval tree private member.
+ * @wakeref: wakeref.
* @vm: non-refcounted pointer to the vm. This is for internal use only and
* this member is cleared after vm_resource unbind.
* @mr: The memory region of the object pointed to by the vma.
@@ -88,25 +110,13 @@ struct i915_vma_resource {
intel_wakeref_t wakeref;
/**
- * struct i915_vma_bindinfo - Information needed for async bind
- * only but that can be dropped after the bind has taken place.
- * Consider making this a separate argument to the bind_vma
- * op, coalescing with other arguments like vm, stash, cache_level
- * and flags
- * @pages: The pages sg-table.
- * @page_sizes: Page sizes of the pages.
- * @pages_rsgt: Refcounted sg-table when delayed object destruction
- * is supported. May be NULL.
- * @readonly: Whether the vma should be bound read-only.
- * @lmem: Whether the vma points to lmem.
+ * @bi: Information needed for async bind only but that can be dropped
+ * after the bind has taken place.
+ *
+ * Consider making this a separate argument to the bind_vma op,
+ * coalescing with other arguments like vm, stash, cache_level and flags
*/
- struct i915_vma_bindinfo {
- struct sg_table *pages;
- struct i915_page_sizes page_sizes;
- struct i915_refct_sgt *pages_rsgt;
- bool readonly:1;
- bool lmem:1;
- } bi;
+ struct i915_vma_bindinfo bi;
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
struct intel_memory_region *mr;
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 77fda2244d16..64472b7f0e77 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -32,8 +32,6 @@
#include "gem/i915_gem_object_types.h"
-enum i915_cache_level;
-
/**
* DOC: Global GTT views
*
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 080a4557899b..d737dd601a6e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -35,6 +35,8 @@
#include "gt/intel_context_types.h"
#include "gt/intel_sseu.h"
+#include "gem/i915_gem_object_types.h"
+
struct drm_printer;
struct drm_i915_private;
struct intel_gt_definition;
@@ -308,6 +310,9 @@ struct intel_device_info {
* Initial runtime info. Do not access outside of i915_driver_create().
*/
const struct intel_runtime_info __runtime;
+
+ u32 cachelevel_to_pat[I915_MAX_CACHE_LEVEL];
+ u32 max_pat_index;
};
struct intel_driver_caps {
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 9d4c7724e98e..bb2e15329f34 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -12,7 +12,9 @@
#include "i915_drv.h"
#include "intel_pxp.h"
+#include "intel_pxp_gsccs.h"
#include "intel_pxp_irq.h"
+#include "intel_pxp_regs.h"
#include "intel_pxp_session.h"
#include "intel_pxp_tee.h"
#include "intel_pxp_types.h"
@@ -60,21 +62,22 @@ bool intel_pxp_is_active(const struct intel_pxp *pxp)
return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
}
-/* KCR register definitions */
-#define KCR_INIT _MMIO(0x320f0)
-/* Setting KCR Init bit is required after system boot */
-#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
+{
+ u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+
+ intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
+}
-static void kcr_pxp_enable(struct intel_gt *gt)
+static void kcr_pxp_enable(const struct intel_pxp *pxp)
{
- intel_uncore_write(gt->uncore, KCR_INIT,
- _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+ kcr_pxp_set_status(pxp, true);
}
-static void kcr_pxp_disable(struct intel_gt *gt)
+static void kcr_pxp_disable(const struct intel_pxp *pxp)
{
- intel_uncore_write(gt->uncore, KCR_INIT,
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+ kcr_pxp_set_status(pxp, false);
}
static int create_vcs_context(struct intel_pxp *pxp)
@@ -126,13 +129,21 @@ static void pxp_init_full(struct intel_pxp *pxp)
init_completion(&pxp->termination);
complete_all(&pxp->termination);
+ if (pxp->ctrl_gt->type == GT_MEDIA)
+ pxp->kcr_base = MTL_KCR_BASE;
+ else
+ pxp->kcr_base = GEN12_KCR_BASE;
+
intel_pxp_session_management_init(pxp);
ret = create_vcs_context(pxp);
if (ret)
return;
- ret = intel_pxp_tee_component_init(pxp);
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ ret = intel_pxp_gsccs_init(pxp);
+ else
+ ret = intel_pxp_tee_component_init(pxp);
if (ret)
goto out_context;
@@ -165,9 +176,12 @@ static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_p
/*
* For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
* on the media GT. NOTE: if we have a media-tile with a GSC-engine,
- * the VDBOX is already present so skip that check
+ * the VDBOX is already present so skip that check. We also have to
+ * ensure the GSC and HUC firmware are coming online
*/
- if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0))
+ if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) &&
+ intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) &&
+ intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw))
return i915->media_gt;
/*
@@ -207,7 +221,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
if (!i915->pxp)
return -ENOMEM;
+ /* init common info used by all feature-mode usages*/
i915->pxp->ctrl_gt = gt;
+ mutex_init(&i915->pxp->tee_mutex);
/*
* If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
@@ -229,7 +245,10 @@ void intel_pxp_fini(struct drm_i915_private *i915)
i915->pxp->arb_is_valid = false;
- intel_pxp_tee_component_fini(i915->pxp);
+ if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0))
+ intel_pxp_gsccs_fini(i915->pxp);
+ else
+ intel_pxp_tee_component_fini(i915->pxp);
destroy_vcs_context(i915->pxp);
@@ -270,8 +289,18 @@ static bool pxp_component_bound(struct intel_pxp *pxp)
return bound;
}
+int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp)
+{
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ return GSCFW_MAX_ROUND_TRIP_LATENCY_MS;
+ else
+ return 250;
+}
+
static int __pxp_global_teardown_final(struct intel_pxp *pxp)
{
+ int timeout;
+
if (!pxp->arb_is_valid)
return 0;
/*
@@ -281,7 +310,9 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
intel_pxp_mark_termination_in_progress(pxp);
intel_pxp_terminate(pxp, false);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ timeout = intel_pxp_get_backend_timeout_ms(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
return -ETIMEDOUT;
return 0;
@@ -289,6 +320,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
{
+ int timeout;
+
if (pxp->arb_is_valid)
return 0;
/*
@@ -297,7 +330,9 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
*/
pxp_queue_termination(pxp);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250)))
+ timeout = intel_pxp_get_backend_timeout_ms(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
return -ETIMEDOUT;
return 0;
@@ -325,6 +360,26 @@ void intel_pxp_end(struct intel_pxp *pxp)
}
/*
+ * this helper is used by both intel_pxp_start and by
+ * the GET_PARAM IOCTL that user space calls. Thus, the
+ * return values here should match the UAPI spec.
+ */
+int intel_pxp_get_readiness_status(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) {
+ if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250))
+ return 2;
+ } else {
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return 2;
+ }
+ return 1;
+}
+
+/*
* the arb session is restarted from the irq work when we receive the
* termination completion interrupt
*/
@@ -332,11 +387,11 @@ int intel_pxp_start(struct intel_pxp *pxp)
{
int ret = 0;
- if (!intel_pxp_is_enabled(pxp))
- return -ENODEV;
-
- if (wait_for(pxp_component_bound(pxp), 250))
- return -ENXIO;
+ ret = intel_pxp_get_readiness_status(pxp);
+ if (ret < 0)
+ return ret;
+ else if (ret > 1)
+ return -EIO; /* per UAPI spec, user may retry later */
mutex_lock(&pxp->arb_mutex);
@@ -357,14 +412,13 @@ unlock:
void intel_pxp_init_hw(struct intel_pxp *pxp)
{
- kcr_pxp_enable(pxp->ctrl_gt);
+ kcr_pxp_enable(pxp);
intel_pxp_irq_enable(pxp);
}
void intel_pxp_fini_hw(struct intel_pxp *pxp)
{
- kcr_pxp_disable(pxp->ctrl_gt);
-
+ kcr_pxp_disable(pxp);
intel_pxp_irq_disable(pxp);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 3ded0890cd27..17254c3f1267 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -26,6 +26,8 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp);
void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+int intel_pxp_get_readiness_status(struct intel_pxp *pxp);
+int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp);
int intel_pxp_start(struct intel_pxp *pxp);
void intel_pxp_end(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
index ad67e3f49c20..09777719cd84 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
@@ -11,6 +11,10 @@
/* PXP-Cmd-Op definitions */
#define PXP43_CMDID_START_HUC_AUTH 0x0000003A
+#define PXP43_CMDID_INIT_SESSION 0x00000036
+
+/* PXP-Packet sizes for MTL's GSCCS-HECI instruction */
+#define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K)
/* PXP-Input-Packet: HUC-Authentication */
struct pxp43_start_huc_auth_in {
@@ -23,4 +27,24 @@ struct pxp43_start_huc_auth_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_in {
+ struct pxp_cmd_header header;
+ /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ #define PXP43_INIT_SESSION_VALID BIT(0)
+ #define PXP43_INIT_SESSION_APPTYPE BIT(1)
+ #define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
+ u32 protection_mode;
+ #define PXP43_INIT_SESSION_PROTECTION_ARB 0x2
+ u32 sub_session_id;
+ u32 init_flags;
+ u32 rsvd[12];
+} __packed;
+
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_out {
+ struct pxp_cmd_header header;
+ u32 rsvd[8];
+} __packed;
+
#endif /* __INTEL_PXP_FW_INTERFACE_43_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index 4b8e70caa3ad..e07c5b380789 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -14,6 +14,7 @@
#include "intel_pxp.h"
#include "intel_pxp_debugfs.h"
+#include "intel_pxp_gsccs.h"
#include "intel_pxp_irq.h"
#include "intel_pxp_types.h"
@@ -45,6 +46,7 @@ static int pxp_terminate_set(void *data, u64 val)
{
struct intel_pxp *pxp = data;
struct intel_gt *gt = pxp->ctrl_gt;
+ int timeout_ms;
if (!intel_pxp_is_active(pxp))
return -ENODEV;
@@ -54,8 +56,10 @@ static int pxp_terminate_set(void *data, u64 val)
intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
spin_unlock_irq(gt->irq_lock);
+ timeout_ms = intel_pxp_get_backend_timeout_ms(pxp);
+
if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(100)))
+ msecs_to_jiffies(timeout_ms)))
return -ETIMEDOUT;
return 0;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
new file mode 100644
index 000000000000..8dc41de3f6f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2023 Intel Corporation.
+ */
+
+#include "gem/i915_gem_internal.h"
+
+#include "gt/intel_context.h"
+#include "gt/uc/intel_gsc_fw.h"
+#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
+
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_cmd_interface_42.h"
+#include "intel_pxp_cmd_interface_43.h"
+#include "intel_pxp_gsccs.h"
+#include "intel_pxp_types.h"
+
+static bool
+is_fw_err_platform_config(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const char *
+fw_err_to_string(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ return "ERR_API_VERSION";
+ case PXP_STATUS_NOT_READY:
+ return "ERR_NOT_READY";
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ return "ERR_PLATFORM_CONFIG";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int
+gsccs_send_message(struct intel_pxp *pxp,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max,
+ size_t *msg_out_len,
+ u64 *gsc_msg_handle_retry)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct drm_i915_private *i915 = gt->i915;
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+ struct intel_gsc_mtl_header *header = exec_res->pkt_vaddr;
+ struct intel_gsc_heci_non_priv_pkt pkt;
+ size_t max_msg_size;
+ u32 reply_size;
+ int ret;
+
+ if (!exec_res->ce)
+ return -ENODEV;
+
+ max_msg_size = PXP43_MAX_HECI_INOUT_SIZE - sizeof(*header);
+
+ if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
+ return -ENOSPC;
+
+ if (!exec_res->pkt_vma || !exec_res->bb_vma)
+ return -ENOENT;
+
+ GEM_BUG_ON(exec_res->pkt_vma->size < (2 * PXP43_MAX_HECI_INOUT_SIZE));
+
+ mutex_lock(&pxp->tee_mutex);
+
+ memset(header, 0, sizeof(*header));
+ intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_PXP,
+ msg_in_size + sizeof(*header),
+ exec_res->host_session_handle);
+
+ /* check if this is a host-session-handle cleanup call (empty packet) */
+ if (!msg_in && !msg_out)
+ header->flags |= GSC_INFLAG_MSG_CLEANUP;
+
+ /* copy caller provided gsc message handle if this is polling for a prior msg completion */
+ header->gsc_message_handle = *gsc_msg_handle_retry;
+
+ /* NOTE: zero size packets are used for session-cleanups */
+ if (msg_in && msg_in_size)
+ memcpy(exec_res->pkt_vaddr + sizeof(*header), msg_in, msg_in_size);
+
+ pkt.addr_in = i915_vma_offset(exec_res->pkt_vma);
+ pkt.size_in = header->message_size;
+ pkt.addr_out = pkt.addr_in + PXP43_MAX_HECI_INOUT_SIZE;
+ pkt.size_out = msg_out_size_max + sizeof(*header);
+ pkt.heci_pkt_vma = exec_res->pkt_vma;
+ pkt.bb_vma = exec_res->bb_vma;
+
+ /*
+ * Before submitting, let's clear-out the validity marker on the reply offset.
+ * We use offset PXP43_MAX_HECI_INOUT_SIZE for reply location so point header there.
+ */
+ header = exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE;
+ header->validity_marker = 0;
+
+ ret = intel_gsc_uc_heci_cmd_submit_nonpriv(&gt->uc.gsc,
+ exec_res->ce, &pkt, exec_res->bb_vaddr,
+ GSC_REPLY_LATENCY_MS);
+ if (ret) {
+ drm_err(&i915->drm, "failed to send gsc PXP msg (%d)\n", ret);
+ goto unlock;
+ }
+
+ /* Response validity marker, status and busyness */
+ if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) {
+ drm_err(&i915->drm, "gsc PXP reply with invalid validity marker\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->status != 0) {
+ drm_dbg(&i915->drm, "gsc PXP reply status has error = 0x%08x\n",
+ header->status);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (header->flags & GSC_OUTFLAG_MSG_PENDING) {
+ drm_dbg(&i915->drm, "gsc PXP reply is busy\n");
+ /*
+ * When the GSC firmware replies with pending bit, it means that the requested
+ * operation has begun but the completion is pending and the caller needs
+ * to re-request with the gsc_message_handle that was returned by the firmware.
+ * until the pending bit is turned off.
+ */
+ *gsc_msg_handle_retry = header->gsc_message_handle;
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ reply_size = header->message_size - sizeof(*header);
+ if (reply_size > msg_out_size_max) {
+ drm_warn(&i915->drm, "caller with insufficient PXP reply size %u (%ld)\n",
+ reply_size, msg_out_size_max);
+ reply_size = msg_out_size_max;
+ }
+
+ if (msg_out)
+ memcpy(msg_out, exec_res->pkt_vaddr + PXP43_MAX_HECI_INOUT_SIZE + sizeof(*header),
+ reply_size);
+ if (msg_out_len)
+ *msg_out_len = reply_size;
+
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+static int
+gsccs_send_message_retry_complete(struct intel_pxp *pxp,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max,
+ size_t *msg_out_len)
+{
+ u64 gsc_session_retry = 0;
+ int ret, tries = 0;
+
+ /*
+ * Keep sending request if GSC firmware was busy. Based on fw specs +
+ * sw overhead (and testing) we expect a worst case pending-bit delay of
+ * GSC_PENDING_RETRY_MAXCOUNT x GSC_PENDING_RETRY_PAUSE_MS millisecs.
+ */
+ do {
+ ret = gsccs_send_message(pxp, msg_in, msg_in_size, msg_out, msg_out_size_max,
+ msg_out_len, &gsc_session_retry);
+ /* Only try again if gsc says so */
+ if (ret != -EAGAIN)
+ break;
+
+ msleep(GSC_PENDING_RETRY_PAUSE_MS);
+ } while (++tries < GSC_PENDING_RETRY_MAXCOUNT);
+
+ return ret;
+}
+
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
+{
+ /*
+ * GSC-fw loading, HuC-fw loading, HuC-fw authentication and
+ * GSC-proxy init flow (requiring an mei component driver)
+ * must all occur first before we can start requesting for PXP
+ * sessions. Checking for completion on HuC authentication and
+ * gsc-proxy init flow (the last set of dependencies that
+ * are out of order) will suffice.
+ */
+ if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc) &&
+ intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc))
+ return true;
+
+ return false;
+}
+
+int intel_pxp_gsccs_create_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp43_create_arb_in msg_in = {0};
+ struct pxp43_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
+ msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, arb_session_id) |
+ FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
+ FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
+
+ ret = gsccs_send_message_retry_complete(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out), NULL);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to init session %d, ret=[%d]\n", arb_session_id, ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP init-session-%d failed due to BIOS/SOC:0x%08x:%s\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP init-session-%d failed 0x%08x:%st:\n",
+ arb_session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+
+ return ret;
+}
+
+void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ struct pxp42_inv_stream_key_in msg_in = {0};
+ struct pxp42_inv_stream_key_out msg_out = {0};
+ int ret = 0;
+
+ /*
+ * Stream key invalidation reuses the same version 4.2 input/output
+ * command format but firmware requires 4.3 API interaction
+ */
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id);
+
+ ret = gsccs_send_message_retry_complete(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out), NULL);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to inv-stream-key-%u, ret=[%d]\n",
+ session_id, ret);
+ } else if (msg_out.header.status != 0) {
+ if (is_fw_err_platform_config(msg_out.header.status)) {
+ drm_info_once(&i915->drm,
+ "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ } else {
+ drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n",
+ session_id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
+ }
+ }
+}
+
+static void
+gsccs_cleanup_fw_host_session_handle(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
+ int ret;
+
+ ret = gsccs_send_message_retry_complete(pxp, NULL, 0, NULL, 0, NULL);
+ if (ret)
+ drm_dbg(&i915->drm, "Failed to send gsccs msg host-session-cleanup: ret=[%d]\n",
+ ret);
+}
+
+static void
+gsccs_destroy_execution_resource(struct intel_pxp *pxp)
+{
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+
+ if (exec_res->host_session_handle)
+ gsccs_cleanup_fw_host_session_handle(pxp);
+ if (exec_res->ce)
+ intel_context_put(exec_res->ce);
+ if (exec_res->bb_vma)
+ i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP);
+ if (exec_res->pkt_vma)
+ i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP);
+
+ memset(exec_res, 0, sizeof(*exec_res));
+}
+
+static int
+gsccs_create_buffer(struct intel_gt *gt,
+ const char *bufname, size_t size,
+ struct i915_vma **vma, void **map)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate gsccs backend %s.\n", bufname);
+ err = PTR_ERR(obj);
+ goto out_none;
+ }
+
+ *vma = i915_vma_instance(obj, gt->vm, NULL);
+ if (IS_ERR(*vma)) {
+ drm_err(&i915->drm, "Failed to vma-instance gsccs backend %s.\n", bufname);
+ err = PTR_ERR(*vma);
+ goto out_put;
+ }
+
+ /* return a virtual pointer */
+ *map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(*map)) {
+ drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
+ err = PTR_ERR(*map);
+ goto out_put;
+ }
+
+ /* all PXP sessions commands are treated as non-privileged */
+ err = i915_vma_pin(*vma, 0, 0, PIN_USER);
+ if (err) {
+ drm_err(&i915->drm, "Failed to vma-pin gsccs backend %s.\n", bufname);
+ goto out_unmap;
+ }
+
+ return 0;
+
+out_unmap:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_none:
+ *vma = NULL;
+ *map = NULL;
+
+ return err;
+}
+
+static int
+gsccs_allocate_execution_resource(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp->ctrl_gt;
+ struct gsccs_session_resources *exec_res = &pxp->gsccs_res;
+ struct intel_engine_cs *engine = gt->engine[GSC0];
+ struct intel_context *ce;
+ int err = 0;
+
+ /*
+ * First, ensure the GSC engine is present.
+ * NOTE: Backend would only be called with the correct gt.
+ */
+ if (!engine)
+ return -ENODEV;
+
+ /*
+ * Now, allocate, pin and map two objects, one for the heci message packet
+ * and another for the batch buffer we submit into GSC engine (that includes the packet).
+ * NOTE: GSC-CS backend is currently only supported on MTL, so we allocate shmem.
+ */
+ err = gsccs_create_buffer(pxp->ctrl_gt, "Heci Packet",
+ 2 * PXP43_MAX_HECI_INOUT_SIZE,
+ &exec_res->pkt_vma, &exec_res->pkt_vaddr);
+ if (err)
+ return err;
+
+ err = gsccs_create_buffer(pxp->ctrl_gt, "Batch Buffer", PAGE_SIZE,
+ &exec_res->bb_vma, &exec_res->bb_vaddr);
+ if (err)
+ goto free_pkt;
+
+ /* Finally, create an intel_context to be used during the submission */
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "Failed creating gsccs backend ctx\n");
+ err = PTR_ERR(ce);
+ goto free_batch;
+ }
+
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(pxp->ctrl_gt->vm);
+ exec_res->ce = ce;
+
+ /* initialize host-session-handle (for all i915-to-gsc-firmware PXP cmds) */
+ get_random_bytes(&exec_res->host_session_handle, sizeof(exec_res->host_session_handle));
+
+ return 0;
+
+free_batch:
+ i915_vma_unpin_and_release(&exec_res->bb_vma, I915_VMA_RELEASE_MAP);
+free_pkt:
+ i915_vma_unpin_and_release(&exec_res->pkt_vma, I915_VMA_RELEASE_MAP);
+ memset(exec_res, 0, sizeof(*exec_res));
+
+ return err;
+}
+
+void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
+
+ gsccs_destroy_execution_resource(pxp);
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
+}
+
+int intel_pxp_gsccs_init(struct intel_pxp *pxp)
+{
+ int ret;
+ intel_wakeref_t wakeref;
+
+ ret = gsccs_allocate_execution_resource(pxp);
+ if (!ret) {
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref)
+ intel_pxp_init_hw(pxp);
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
new file mode 100644
index 000000000000..298ad38e6c7d
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_GSCCS_H__
+#define __INTEL_PXP_GSCCS_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GSC_REPLY_LATENCY_MS 210
+/*
+ * Max FW response time is 200ms, to which we add 10ms to account for overhead
+ * such as request preparation, GuC submission to hw and pipeline completion times.
+ */
+#define GSC_PENDING_RETRY_MAXCOUNT 40
+#define GSC_PENDING_RETRY_PAUSE_MS 50
+#define GSCFW_MAX_ROUND_TRIP_LATENCY_MS (GSC_PENDING_RETRY_MAXCOUNT * GSC_PENDING_RETRY_PAUSE_MS)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_gsccs_fini(struct intel_pxp *pxp);
+int intel_pxp_gsccs_init(struct intel_pxp *pxp);
+
+int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
+void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
+
+#else
+static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
+{
+}
+
+static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
+{
+ return 0;
+}
+
+#endif
+
+bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
+
+#endif /*__INTEL_PXP_GSCCS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 4f836b317424..1a04067f61fc 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -43,8 +43,9 @@ void intel_pxp_resume_complete(struct intel_pxp *pxp)
* The PXP component gets automatically unbound when we go into S3 and
* re-bound after we come out, so in that scenario we can defer the
* hw init to the bind call.
+ * NOTE: GSC-CS backend doesn't rely on components.
*/
- if (!pxp->pxp_component)
+ if (!HAS_ENGINE(pxp->ctrl_gt, GSC0) && !pxp->pxp_component)
return;
intel_pxp_init_hw(pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h
new file mode 100644
index 000000000000..a9e7e6efa4c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_regs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2023, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_REGS_H__
+#define __INTEL_PXP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* KCR subsystem register base address */
+#define GEN12_KCR_BASE 0x32000
+#define MTL_KCR_BASE 0x386000
+
+/* KCR enable/disable control */
+#define KCR_INIT(base) _MMIO((base) + 0xf0)
+
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+/* KCR hwdrm session in play status 0-31 */
+#define KCR_SIP(base) _MMIO((base) + 0x260)
+
+/* PXP global terminate register for session termination */
+#define KCR_GLOBAL_TERMINATE(base) _MMIO((base) + 0xf8)
+
+#endif /* __INTEL_PXP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 7de849cb6c47..0a3e66b0265e 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -7,17 +7,14 @@
#include "intel_pxp.h"
#include "intel_pxp_cmd.h"
+#include "intel_pxp_gsccs.h"
#include "intel_pxp_session.h"
#include "intel_pxp_tee.h"
#include "intel_pxp_types.h"
+#include "intel_pxp_regs.h"
#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
-#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */
-
-/* PXP global terminate register for session termination */
-#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8)
-
static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
{
struct intel_uncore *uncore = pxp->ctrl_gt->uncore;
@@ -26,7 +23,7 @@ static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
/* if we're suspended the session is considered off */
with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
- sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
+ sip = intel_uncore_read(uncore, KCR_SIP(pxp->kcr_base));
return sip & BIT(id);
}
@@ -44,10 +41,10 @@ static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_pla
return in_play ? -ENODEV : 0;
ret = intel_wait_for_register(uncore,
- GEN12_KCR_SIP,
+ KCR_SIP(pxp->kcr_base),
mask,
in_play ? mask : 0,
- 100);
+ 250);
intel_runtime_pm_put(uncore->rpm, wakeref);
@@ -66,7 +63,10 @@ static int pxp_create_arb_session(struct intel_pxp *pxp)
return -EEXIST;
}
- ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
+ ret = intel_pxp_gsccs_create_session(pxp, ARB_SESSION);
+ else
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
if (ret) {
drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
return ret;
@@ -108,9 +108,12 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
return ret;
}
- intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+ intel_uncore_write(gt->uncore, KCR_GLOBAL_TERMINATE(pxp->kcr_base), 1);
- intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
+ if (HAS_ENGINE(gt, GSC0))
+ intel_pxp_gsccs_end_arb_fw_session(pxp, ARB_SESSION);
+ else
+ intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION);
return ret;
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index a2846b1dbbee..1ce07d7e8769 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -284,8 +284,6 @@ int intel_pxp_tee_component_init(struct intel_pxp *pxp)
struct intel_gt *gt = pxp->ctrl_gt;
struct drm_i915_private *i915 = gt->i915;
- mutex_init(&pxp->tee_mutex);
-
ret = alloc_streaming_command(pxp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 007de49e1ea4..1a8765866b8b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -27,13 +27,35 @@ struct intel_pxp {
struct intel_gt *ctrl_gt;
/**
+ * @kcr_base: base mmio offset for the KCR engine which is different on legacy platforms
+ * vs newer platforms where the KCR is inside the media-tile.
+ */
+ u32 kcr_base;
+
+ /**
+ * @gsccs_res: resources for request submission for platforms that have a GSC engine.
+ */
+ struct gsccs_session_resources {
+ u64 host_session_handle; /* used by firmware to link commands to sessions */
+ struct intel_context *ce; /* context for gsc command submission */
+
+ struct i915_vma *pkt_vma; /* GSC FW cmd packet vma */
+ void *pkt_vaddr; /* GSC FW cmd packet virt pointer */
+
+ struct i915_vma *bb_vma; /* HECI_PKT batch buffer vma */
+ void *bb_vaddr; /* HECI_PKT batch buffer virt pointer */
+ } gsccs_res;
+
+ /**
* @pxp_component: i915_pxp_component struct of the bound mei_pxp
* module. Only set and cleared inside component bind/unbind functions,
* which are protected by &tee_mutex.
*/
struct i915_pxp_component *pxp_component;
- /* @dev_link: Enforce module relationship for power management ordering. */
+ /**
+ * @dev_link: Enforce module relationship for power management ordering.
+ */
struct device_link *dev_link;
/**
* @pxp_component_added: track if the pxp component has been added.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d91d0ade8abd..61da4ed9d521 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -57,7 +57,10 @@ static void trash_stolen(struct drm_i915_private *i915)
u32 __iomem *s;
int x;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE),
+ 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 37068542aafe..f8fe3681c3dc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -27,6 +27,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "i915_selftest.h"
@@ -245,7 +246,7 @@ static int igt_evict_for_cache_color(void *arg)
struct drm_mm_node target = {
.start = I915_GTT_PAGE_SIZE * 2,
.size = I915_GTT_PAGE_SIZE,
- .color = I915_CACHE_LLC,
+ .color = i915_gem_get_pat_index(gt->i915, I915_CACHE_LLC),
};
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -308,7 +309,7 @@ static int igt_evict_for_cache_color(void *arg)
/* Attempt to remove the first *pinned* vma, by removing the (empty)
* neighbour -- this should fail.
*/
- target.color = I915_CACHE_L3_LLC;
+ target.color = i915_gem_get_pat_index(gt->i915, I915_CACHE_L3_LLC);
mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
@@ -507,7 +508,8 @@ static int igt_evict_contexts(void *arg)
}
err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
if (err) {
- pr_err("Failed to idle GT (on %s)", engine->name);
+ gt_err(engine->gt, "Failed to idle GT (on %s)",
+ engine->name);
break;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 154801f1c468..36940ef10108 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -135,7 +135,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+ obj->pat_index = i915_gem_get_pat_index(i915, I915_CACHE_NONE);
/* Preallocate the "backing storage" */
if (i915_gem_object_pin_pages_unlocked(obj))
@@ -359,7 +359,9 @@ alloc_vm_end:
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
vm->insert_entries(vm, mock_vma_res,
- I915_CACHE_NONE, 0);
+ i915_gem_get_pat_index(vm->i915,
+ I915_CACHE_NONE),
+ 0);
}
count = n;
@@ -1377,7 +1379,10 @@ static int igt_ggtt_page(void *arg)
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, 0),
- offset, I915_CACHE_NONE, 0);
+ offset,
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE),
+ 0);
}
order = i915_random_order(count, &prng);
@@ -1510,7 +1515,7 @@ static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
mutex_lock(&vm->mutex);
err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
offset,
- obj->cache_level,
+ obj->pat_index,
0);
if (!err) {
i915_vma_resource_init_from_vma(vma_res, vma);
@@ -1690,7 +1695,7 @@ static int insert_gtt_with_resource(struct i915_vma *vma)
mutex_lock(&vm->mutex);
err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
- obj->cache_level, 0, vm->total, 0);
+ obj->pat_index, 0, vm->total, 0);
if (!err) {
i915_vma_resource_init_from_vma(vma_res, vma);
vma->resource = vma_res;
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 72b58b66692a..4ddc6d902752 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
@@ -16,27 +17,31 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
- struct intel_gt *gt = to_gt(i915);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ struct intel_gt *gt;
+ unsigned int i;
int err;
t->i915 = i915;
t->func = func;
t->name = name;
- err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
- if (err) {
- pr_err("%s(%s): failed to idle before, with err=%d!",
- func, name, err);
- return err;
- }
+ for_each_gt(gt, i915, i) {
- t->reset_global = i915_reset_count(&i915->gpu_error);
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ if (err) {
+ gt_err(gt, "%s(%s): GT failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
- for_each_engine(engine, gt, id)
- t->reset_engine[id] =
+ for_each_engine(engine, gt, id)
+ t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
+ }
+
+ t->reset_global = i915_reset_count(&i915->gpu_error);
return 0;
}
@@ -46,6 +51,8 @@ int igt_live_test_end(struct igt_live_test *t)
struct drm_i915_private *i915 = t->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ struct intel_gt *gt;
+ unsigned int i;
if (igt_flush_test(i915))
return -EIO;
@@ -57,16 +64,18 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, to_gt(i915), id) {
- if (t->reset_engine[id] ==
- i915_reset_engine_count(&i915->gpu_error, engine))
- continue;
+ for_each_gt(gt, i915, i) {
+ for_each_engine(engine, gt, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
- pr_err("%s(%s): engine '%s' was reset %d times!\n",
- t->func, t->name, engine->name,
- i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
- return -EIO;
+ gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
+ return -EIO;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 3b18e5905c86..d985d9bae2e8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1070,7 +1070,9 @@ static int igt_lmem_write_cpu(void *arg)
/* Put the pages into a known state -- from the gpu for added fun */
intel_engine_pm_get(engine);
err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
- obj->mm.pages->sgl, I915_CACHE_NONE,
+ obj->mm.pages->sgl,
+ i915_gem_get_pat_index(i915,
+ I915_CACHE_NONE),
true, 0xdeadbeaf, &rq);
if (rq) {
dma_resv_add_fence(obj->base.resv, &rq->fence,
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index f6a7c0bd2955..0eda8b4ee17f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -123,7 +123,9 @@ struct drm_i915_private *mock_gem_device(void)
static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
#endif
struct drm_i915_private *i915;
+ struct intel_device_info *i915_info;
struct pci_dev *pdev;
+ unsigned int i;
int ret;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
@@ -180,6 +182,13 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_2M;
RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
+
+ /* simply use legacy cache level for mock device */
+ i915_info = (struct intel_device_info *)INTEL_INFO(i915);
+ i915_info->max_pat_index = 3;
+ for (i = 0; i < I915_MAX_CACHE_LEVEL; i++)
+ i915_info->cachelevel_to_pat[i] = i;
+
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index ece97e4faacb..a516c0aa88fd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -27,21 +27,21 @@
static void mock_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
- enum i915_cache_level level,
+ unsigned int pat_index,
u32 flags)
{
}
static void mock_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
- enum i915_cache_level level, u32 flags)
+ unsigned int pat_index, u32 flags)
{
}
static void mock_bind_ppgtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
@@ -94,7 +94,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
static void mock_bind_ggtt(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
+ unsigned int pat_index,
u32 flags)
{
}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0012.h b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
index eb99d84eb844..16d4ad5023a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0012.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
@@ -2,6 +2,8 @@
#ifndef __NVIF_IF0012_H__
#define __NVIF_IF0012_H__
+#include <drm/display/drm_dp.h>
+
union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
@@ -63,7 +65,7 @@ union nvif_outp_acquire_args {
__u8 hda;
__u8 mst;
__u8 pad04[4];
- __u8 dpcd[16];
+ __u8 dpcd[DP_RECEIVER_CAP_SIZE];
} dp;
};
} v0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index b7631c1ab242..4e7f873f66e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -3,6 +3,7 @@
#define __NVKM_DISP_OUTP_H__
#include "priv.h"
+#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
@@ -42,7 +43,7 @@ struct nvkm_outp {
bool aux_pwr_pu;
u8 lttpr[6];
u8 lttprs;
- u8 dpcd[16];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
struct {
int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index 4f0ca709c85a..fc283a4a1522 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -146,7 +146,7 @@ nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
-nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[16],
+nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 link_nr, u8 link_bw, bool hda, bool mst)
{
int ret;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b352227a6055..394010a60821 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -313,7 +313,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
*/
void drm_sched_fault(struct drm_gpu_scheduler *sched)
{
- if (sched->ready)
+ if (sched->timeout_wq)
mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
}
EXPORT_SYMBOL(drm_sched_fault);
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index ba2f6a4f8c16..7b177b9fbb09 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -507,6 +507,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 75c92e282fa2..19a4a085f73a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
- const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
- of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
+ of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 238521622b75..253e77189b69 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
- SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
+ RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
- .pm = &mxc_isi_pm_ops,
+ .pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index db538f3d88ec..19e80b95ffea 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
- mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
- mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
-#endif
+ mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
+ upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 98bfd445a649..2a77353f10b5 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
- vnmc = VNMC_IM_ODD_EVEN;
- progressive = true;
- break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
+ /* Gen3 can't scale NV12 */
+ if (vin->info->model == RCAR_GEN3 &&
+ vin->format.pixelformat == V4L2_PIX_FMT_NV12)
+ return -EPIPE;
+
if (!vin->scaler)
return -EPIPE;
} else {
- if (fmt.format.width != vin->format.width ||
- fmt.format.height != vin->format.height)
- return -EPIPE;
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
+ if (ALIGN(fmt.format.width, 32) != vin->format.width ||
+ ALIGN(fmt.format.height, 32) != vin->format.height)
+ return -EPIPE;
+ } else {
+ if (fmt.format.width != vin->format.width ||
+ fmt.format.height != vin->format.height)
+ return -EPIPE;
+ }
}
if (fmt.format.code != vin->mbus_code)
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index d21486d69df2..37db142de413 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -62,4 +62,4 @@ config INTEL_MEI_GSC
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
-
+source "drivers/misc/mei/gsc_proxy/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index fb740d754900..14aee253ae48 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -30,3 +30,4 @@ CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
+obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
diff --git a/drivers/misc/mei/gsc_proxy/Kconfig b/drivers/misc/mei/gsc_proxy/Kconfig
new file mode 100644
index 000000000000..5f68d9f3d691
--- /dev/null
+++ b/drivers/misc/mei/gsc_proxy/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_GSC_PROXY
+ tristate "Intel GSC Proxy services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for GSC Proxy Services on Intel platforms.
+
+ MEI GSC proxy enables messaging between GSC service on
+ Intel graphics card and services on CSE (MEI) firmware
+ residing SoC or PCH.
+
diff --git a/drivers/misc/mei/gsc_proxy/Makefile b/drivers/misc/mei/gsc_proxy/Makefile
new file mode 100644
index 000000000000..358847e9aaa9
--- /dev/null
+++ b/drivers/misc/mei/gsc_proxy/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
+#
+# Makefile - GSC Proxy client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += mei_gsc_proxy.o
diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
new file mode 100644
index 000000000000..be52b113aea9
--- /dev/null
+++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+/**
+ * DOC: MEI_GSC_PROXY Client Driver
+ *
+ * The mei_gsc_proxy driver acts as a translation layer between
+ * proxy user (I915) and ME FW by proxying messages to ME FW
+ */
+
+#include <linux/component.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_gsc_proxy_mei_interface.h>
+
+/**
+ * mei_gsc_proxy_send - Sends a proxy message to ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @buf: a message buffer to send
+ * @size: size of the message
+ * Return: bytes sent on Success, <0 on Failure
+ */
+static int mei_gsc_proxy_send(struct device *dev, const void *buf, size_t size)
+{
+ ssize_t ret;
+
+ if (!dev || !buf)
+ return -EINVAL;
+
+ ret = mei_cldev_send(to_mei_cl_device(dev), buf, size);
+ if (ret < 0)
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", ret);
+
+ return ret;
+}
+
+/**
+ * mei_gsc_proxy_recv - Receives a proxy message from ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @buf: a message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes received on Success, <0 on Failure
+ */
+static int mei_gsc_proxy_recv(struct device *dev, void *buf, size_t size)
+{
+ ssize_t ret;
+
+ if (!dev || !buf)
+ return -EINVAL;
+
+ ret = mei_cldev_recv(to_mei_cl_device(dev), buf, size);
+ if (ret < 0)
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", ret);
+
+ return ret;
+}
+
+static const struct i915_gsc_proxy_component_ops mei_gsc_proxy_ops = {
+ .owner = THIS_MODULE,
+ .send = mei_gsc_proxy_send,
+ .recv = mei_gsc_proxy_recv,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
+
+ comp_master->ops = &mei_gsc_proxy_ops;
+ comp_master->mei_dev = dev;
+ return component_bind_all(dev, comp_master);
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
+
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+/**
+ * mei_gsc_proxy_component_match - compare function for matching mei.
+ *
+ * The function checks if the device is pci device and
+ * Intel VGA adapter, the subcomponent is SW Proxy
+ * and the parent of MEI PCI and the parent of VGA are the same PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
+ * @data: compare data (mei pci parent)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
+static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
+ pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (subcomponent != I915_COMPONENT_GSC_PROXY)
+ return 0;
+
+ return component_compare_dev(dev->parent, ((struct device *)data)->parent);
+}
+
+static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_gsc_proxy_component *comp_master;
+ struct component_match *master_match = NULL;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_gsc_proxy_component_match, cldev->dev.parent);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static void mei_gsc_proxy_remove(struct mei_cl_device *cldev)
+{
+ struct i915_gsc_proxy_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed %d\n", ret);
+}
+
+#define MEI_UUID_GSC_PROXY UUID_LE(0xf73db04, 0x97ab, 0x4125, \
+ 0xb8, 0x93, 0xe9, 0x4, 0xad, 0xd, 0x54, 0x64)
+
+static struct mei_cl_device_id mei_gsc_proxy_tbl[] = {
+ { .uuid = MEI_UUID_GSC_PROXY, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_gsc_proxy_tbl);
+
+static struct mei_cl_driver mei_gsc_proxy_driver = {
+ .id_table = mei_gsc_proxy_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_gsc_proxy_probe,
+ .remove = mei_gsc_proxy_remove,
+};
+
+module_mei_cl_driver(mei_gsc_proxy_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI GSC PROXY");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c2d080fc4fc4..27cbe148f0db 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -84,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* Limit the max delay range to 300s */
+static struct netlink_range_validation delay_range = {
+ .max = 300000,
+};
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@@ -114,7 +119,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
- [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 0498fc6731f8..f3f27f0bd2a6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -169,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 300000, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
+};
+
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@@ -488,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
- .values = bond_intmax_tbl,
+ .values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 57ce74315eba..caa00c72aeeb 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -294,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- /* Clear PCI MSI-X Pending Bit Array (PBA)
- *
- * This bit is set if an interrupt event occurs while the vector is
- * masked. If this bit is set and we reenable the interrupt, it will
- * fire again. Since we're just about to poll the queue state, we don't
- * need it to fire again.
- *
- * Under high softirq load, it's possible that the interrupt condition
- * is triggered twice before we got the chance to process it.
- */
- gve_write_irq_doorbell_dqo(priv, block,
- GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
-
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 4c205afbd230..985cff910f30 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -654,7 +654,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
BIT(hw->index), BIT(hw->index));
}
- if (!hw_list[!hw->index]->wed_dev &&
+ if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index ef6fd3f6be30..5595bfe84bbb 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -307,15 +307,15 @@ static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
- REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000214),
- REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000218),
- REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00021c),
- REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000220),
- REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000224),
- REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000228),
- REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00022c),
- REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000230),
- REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000234),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 4538f334df57..d3c5306f1c41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -181,6 +181,7 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
+#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index afaec3fb9ab6..03b1c5a97826 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -25,6 +25,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONFIG);
+ u32 clk_rate;
value |= GMAC_CORE_INIT;
@@ -47,6 +48,10 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
+ /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+ writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
+
/* Enable GMAC interrupts */
value = GMAC_INT_DEFAULT_ENABLE;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 460b3d4f2245..ab5133eb1d51 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -436,6 +436,9 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, &rt->dst);
+
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
@@ -474,6 +477,9 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
goto err;
}
skb_dst_set(skb, dst);
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index 68fc55906e78..554837c21e73 100644
--- a/drivers/net/mdio/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
@@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
+ int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
- return of_mdiobus_register(mdio, dev->of_node);
+ ret = of_mdiobus_register(mdio, dev->of_node);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ usb_put_dev(mvusb->udev);
+ return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 539cd43eae8d..f19d48c94fe0 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -1203,7 +1203,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_2500BASEX] = {
.supported = xpcs_2500basex_features,
.interface = xpcs_2500basex_interfaces,
- .num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
.an_mode = DW_2500BASEX,
},
};
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 9902fb182099..729db441797a 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -40,6 +40,11 @@ static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
}
+static inline int bcm_phy_read_exp_sel(struct phy_device *phydev, u16 reg)
+{
+ return bcm_phy_read_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER);
+}
+
int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 06be71ecd2f8..f8c17a253f8b 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -486,7 +486,7 @@ static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
/* Read CORE_EXPA9 */
- tmp = bcm_phy_read_exp(phydev, 0x00a9);
+ tmp = bcm_phy_read_exp_sel(phydev, 0x00a9);
/* CORE_EXPA9[6:1] is rcalcode[5:0] */
rcalcode = (tmp & 0x7e) / 2;
/* Correct RCAL code + 1 is -1% rprogr, LP: +16 */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index ce993cc75bf3..d30d730ed5a7 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -742,7 +742,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
/* copy skb_ubuf_info for callback when skb has no error */
@@ -1197,7 +1197,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
/* Move network header to the right position for VLAN tagged packets */
if (eth_type_vlan(skb->protocol) &&
- __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+ vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
skb_set_network_header(skb, depth);
rcu_read_lock();
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index 91a077c35b8b..a79318e90a13 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -784,7 +784,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
fifo = vring->fifo;
/* Return if vdev is not ready. */
- if (!fifo->vdev[devid])
+ if (!fifo || !fifo->vdev[devid])
return;
/* Return if another vring is running. */
@@ -980,9 +980,13 @@ static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
vq->num_max = vring->num;
+ vq->priv = vring;
+
+ /* Make vq update visible before using it. */
+ virtio_mb(false);
+
vqs[i] = vq;
vring->vq = vq;
- vq->priv = vring;
}
return 0;
@@ -1302,6 +1306,9 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
+ /* Make all updates visible before setting the 'is_ready' flag. */
+ virtio_mb(false);
+
fifo->is_ready = true;
return 0;
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 873f59c3e280..6364ae262705 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -211,6 +211,7 @@ struct bios_rfkill2_state {
static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x270, { KEY_MICMUTE } },
{ KE_KEY, 0x20e6, { KEY_PROG1 } },
{ KE_KEY, 0x20e8, { KEY_MEDIA } },
{ KE_KEY, 0x2142, { KEY_MEDIA } },
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 1a300e14f350..064f186ae81b 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
int min_max)
{
unsigned int input;
+ int ret;
if (kstrtouint(buf, 10, &input))
return -EINVAL;
mutex_lock(&uncore_lock);
- uncore_write(data, input, min_max);
+ ret = uncore_write(data, input, min_max);
mutex_unlock(&uncore_lock);
+ if (ret)
+ return ret;
+
return count;
}
diff --git a/drivers/platform/x86/intel_scu_pcidrv.c b/drivers/platform/x86/intel_scu_pcidrv.c
index 80abc708e4f2..d904fad499aa 100644
--- a/drivers/platform/x86/intel_scu_pcidrv.c
+++ b/drivers/platform/x86/intel_scu_pcidrv.c
@@ -34,6 +34,7 @@ static int intel_scu_pci_probe(struct pci_dev *pdev,
static const struct pci_device_id pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x080e) },
+ { PCI_VDEVICE(INTEL, 0x082a) },
{ PCI_VDEVICE(INTEL, 0x08ea) },
{ PCI_VDEVICE(INTEL, 0x0a94) },
{ PCI_VDEVICE(INTEL, 0x11a0) },
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 6fe82f805ea8..b3808ad77278 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
static DEFINE_MUTEX(dytc_mutex);
static int dytc_capabilities;
static bool dytc_mmc_get_available;
+static int profile_force;
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
@@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (err)
return err;
+ /* Check if user wants to override the profile selection */
+ if (profile_force) {
+ switch (profile_force) {
+ case -1:
+ dytc_capabilities = 0;
+ break;
+ case 1:
+ dytc_capabilities = BIT(DYTC_FC_MMC);
+ break;
+ case 2:
+ dytc_capabilities = BIT(DYTC_FC_PSC);
+ break;
+ }
+ pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
+ }
if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
pr_debug("MMC is supported\n");
/*
@@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
dytc_mmc_get_available = true;
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
- /* Support for this only works on AMD platforms */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
- dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
- return -ENODEV;
- }
pr_debug("PSC is supported\n");
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
@@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
"Initial state of the emulated UWB switch");
#endif
+module_param(profile_force, int, 0444);
+MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
+
static void thinkpad_acpi_module_exit(void)
{
struct ibm_struct *ibm, *itmp;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 13802a3c3591..68e66b60445c 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
.properties = dexp_ursus_7w_props,
};
+static const struct property_entry dexp_ursus_kx210i_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 2),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data dexp_ursus_kx210i_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = dexp_ursus_kx210i_props,
+};
+
static const struct property_entry digma_citi_e200_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
.properties = gdix1001_upside_down_props,
};
+static const struct ts_dmi_data gdix1002_00_upside_down_data = {
+ .acpi_name = "GDIX1002:00",
+ .properties = gdix1001_upside_down_props,
+};
+
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -1186,6 +1207,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* DEXP Ursus KX210i */
+ .driver_data = (void *)&dexp_ursus_kx210i_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
+ },
+ },
+ {
/* Digma Citi E200 */
.driver_data = (void *)&digma_citi_e200_data,
.matches = {
@@ -1296,6 +1325,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Juno Tablet */
+ .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ /* Both product- and board-name being "Default string" is somewhat rare */
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ /* Above matches are too generic, add partial bios-version match */
+ DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
+ },
+ },
+ {
/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
.driver_data = (void *)&trekstor_surftab_wintron70_data,
.matches = {
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 17d7bb875fee..45fd374fe56c 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -9459,8 +9459,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* that performance might be impacted.
*/
ret = ufshcd_urgent_bkops(hba);
- if (ret)
+ if (ret) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
goto enable_scaling;
+ }
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 3ccf46f8ffd0..07d6e8dc686b 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -124,7 +124,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
- * data from it to check this var.
+ * data from it to check this var.
*/
static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
@@ -182,7 +182,7 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
/*
* Now that we checked it we alter var. The reason being is that the video
- * mode passed in might not work but slight changes to it might make it
+ * mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
@@ -257,8 +257,8 @@ static int mc68x328fb_check_var(struct fb_var_screeninfo *var,
}
/* This routine actually sets the video mode. It's in here where we
- * the hardware state info->par and fix which can be affected by the
- * change in par. For this driver it doesn't do much.
+ * the hardware state info->par and fix which can be affected by the
+ * change in par. For this driver it doesn't do much.
*/
static int mc68x328fb_set_par(struct fb_info *info)
{
@@ -295,7 +295,7 @@ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* {hardwarespecific} contains width of RAMDAC
* cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
- *
+ *
* Pseudocolor:
* uses offset = 0 && length = RAMDAC register width.
* var->{color}.offset is 0
@@ -384,7 +384,7 @@ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var,
}
/*
- * Most drivers don't need their own mmap function
+ * Most drivers don't need their own mmap function
*/
static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index ace104850e50..3d926c3ada73 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -527,7 +527,7 @@ static int arcfb_probe(struct platform_device *dev)
info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
if (!info)
- goto err;
+ goto err_fb_alloc;
info->screen_buffer = videomemory;
info->fbops = &arcfb_ops;
@@ -539,7 +539,7 @@ static int arcfb_probe(struct platform_device *dev)
if (!dio_addr || !cio_addr || !c2io_addr) {
printk(KERN_WARNING "no IO addresses supplied\n");
- goto err1;
+ goto err_addr;
}
par->dio_addr = dio_addr;
par->cio_addr = cio_addr;
@@ -555,12 +555,12 @@ static int arcfb_probe(struct platform_device *dev)
printk(KERN_INFO
"arcfb: Failed req IRQ %d\n", par->irq);
retval = -EBUSY;
- goto err1;
+ goto err_addr;
}
}
retval = register_framebuffer(info);
if (retval < 0)
- goto err1;
+ goto err_register_fb;
platform_set_drvdata(dev, info);
fb_info(info, "Arc frame buffer device, using %dK of video memory\n",
videomemorysize >> 10);
@@ -584,9 +584,12 @@ static int arcfb_probe(struct platform_device *dev)
}
return 0;
-err1:
+
+err_register_fb:
+ free_irq(par->irq, info);
+err_addr:
framebuffer_release(info);
-err:
+err_fb_alloc:
vfree(videomemory);
return retval;
}
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 8187a7c4f910..987c5f5f0241 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -317,7 +317,7 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
/**
* atmel_lcdfb_alloc_video_memory - Allocate framebuffer memory
* @sinfo: the frame buffer to allocate memory for
- *
+ *
* This function is called only from the atmel_lcdfb_probe()
* so no locking by fb_info->mm_lock around smem_len setting is needed.
*/
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index a028ede39c12..832a82f45c80 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -512,7 +512,7 @@ static int cg14_probe(struct platform_device *op)
is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
-
+
memcpy(&par->mmap_map, &__cg14_mmap_map, sizeof(par->mmap_map));
for (i = 0; i < CG14_MMAP_ENTRIES; i++) {
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 77dbf94aae5f..82eeb139c4eb 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -113,14 +113,14 @@ struct fb_info_control {
struct fb_info info;
struct fb_par_control par;
u32 pseudo_palette[16];
-
+
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
-
+
struct control_regs __iomem *control_regs;
unsigned long control_regs_phys;
unsigned long control_regs_size;
-
+
__u8 __iomem *frame_buffer;
unsigned long frame_buffer_phys;
unsigned long fb_orig_base;
@@ -196,7 +196,7 @@ static void set_control_clock(unsigned char *params)
while (!req.complete)
cuda_poll();
}
-#endif
+#endif
}
/*
@@ -233,19 +233,19 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
if (p->par.xoffset != par->xoffset ||
p->par.yoffset != par->yoffset)
set_screen_start(par->xoffset, par->yoffset, p);
-
+
return;
}
-
+
p->par = *par;
cmode = p->par.cmode;
r = &par->regvals;
-
+
/* Turn off display */
out_le32(CNTRL_REG(p,ctrl), 0x400 | par->ctrl);
-
+
set_control_clock(r->clock_params);
-
+
RADACAL_WRITE(0x20, r->radacal_ctrl);
RADACAL_WRITE(0x21, p->control_use_bank2 ? 0 : 1);
RADACAL_WRITE(0x10, 0);
@@ -254,7 +254,7 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
rp = &p->control_regs->vswin;
for (i = 0; i < 16; ++i, ++rp)
out_le32(&rp->r, r->regs[i]);
-
+
out_le32(CNTRL_REG(p,pitch), par->pitch);
out_le32(CNTRL_REG(p,mode), r->mode);
out_le32(CNTRL_REG(p,vram_attr), p->vram_attr);
@@ -366,7 +366,7 @@ static int read_control_sense(struct fb_info_control *p)
sense |= (in_le32(CNTRL_REG(p,mon_sense)) & 0x180) >> 7;
out_le32(CNTRL_REG(p,mon_sense), 077); /* turn off drivers */
-
+
return sense;
}
@@ -558,9 +558,9 @@ static int control_var_to_par(struct fb_var_screeninfo *var,
static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeninfo *var)
{
struct control_regints *rv;
-
+
rv = (struct control_regints *) par->regvals.regs;
-
+
memset(var, 0, sizeof(*var));
var->xres = par->xres;
var->yres = par->yres;
@@ -568,7 +568,7 @@ static void control_par_to_var(struct fb_par_control *par, struct fb_var_screeni
var->yres_virtual = par->vyres;
var->xoffset = par->xoffset;
var->yoffset = par->yoffset;
-
+
switch(par->cmode) {
default:
case CMODE_8:
@@ -634,7 +634,7 @@ static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *i
err = control_var_to_par(var, &par, info);
if (err)
- return err;
+ return err;
control_par_to_var(&par, var);
return 0;
@@ -655,7 +655,7 @@ static int controlfb_set_par (struct fb_info *info)
" control_var_to_par: %d.\n", err);
return err;
}
-
+
control_set_hardware(p, &par);
info->fix.visual = (p->par.cmode == CMODE_8) ?
@@ -840,7 +840,7 @@ static int __init init_control(struct fb_info_control *p)
int full, sense, vmode, cmode, vyres;
struct fb_var_screeninfo var;
int rc;
-
+
printk(KERN_INFO "controlfb: ");
full = p->total_vram == 0x400000;
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 23cf8eba785d..f7e019dded0f 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -257,6 +257,11 @@ static const struct fb_videomode modedb[] = {
{ NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0,
FB_VMODE_DOUBLE },
+ /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */
+ { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ FB_VMODE_NONINTERLACED },
+
/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
{ NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 05837a3b985c..c5b7673ddc6c 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -6,7 +6,7 @@
*
* This driver is based on tgafb.c
*
- * Copyright (C) 1997 Geert Uytterhoeven
+ * Copyright (C) 1997 Geert Uytterhoeven
* Copyright (C) 1995 Jay Estabrook
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -28,7 +28,7 @@
#include <asm/io.h>
#include <asm/jazz.h>
-/*
+/*
* Various defines for the G364
*/
#define G364_MEM_BASE 0xe4400000
@@ -125,7 +125,7 @@ static const struct fb_ops g364fb_ops = {
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
-static int g364fb_pan_display(struct fb_var_screeninfo *var,
+static int g364fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->xoffset ||
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 20bdab738ab7..0af58018441d 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -1,6 +1,6 @@
/*
* linux/drivers/video/hgafb.c -- Hercules graphics adaptor frame buffer device
- *
+ *
* Created 25 Nov 1999 by Ferenc Bakonyi (fero@drama.obuda.kando.hu)
* Based on skeletonfb.c by Geert Uytterhoeven and
* mdacon.c by Andrew Apted
@@ -8,14 +8,14 @@
* History:
*
* - Revision 0.1.8 (23 Oct 2002): Ported to new framebuffer api.
- *
- * - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
+ *
+ * - Revision 0.1.7 (23 Jan 2001): fix crash resulting from MDA only cards
* being detected as Hercules. (Paul G.)
* - Revision 0.1.6 (17 Aug 2000): new style structs
* documentation
* - Revision 0.1.5 (13 Mar 2000): spinlocks instead of saveflags();cli();etc
* minor fixes
- * - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
+ * - Revision 0.1.4 (24 Jan 2000): fixed a bug in hga_card_detect() for
* HGA-only systems
* - Revision 0.1.3 (22 Jan 2000): modified for the new fb_info structure
* screen is cleared after rmmod
@@ -143,7 +143,7 @@ static bool nologo = 0;
static void write_hga_b(unsigned int val, unsigned char reg)
{
- outb_p(reg, HGA_INDEX_PORT);
+ outb_p(reg, HGA_INDEX_PORT);
outb_p(val, HGA_VALUE_PORT);
}
@@ -155,7 +155,7 @@ static void write_hga_w(unsigned int val, unsigned char reg)
static int test_hga_b(unsigned char val, unsigned char reg)
{
- outb_p(reg, HGA_INDEX_PORT);
+ outb_p(reg, HGA_INDEX_PORT);
outb (val, HGA_VALUE_PORT);
udelay(20); val = (inb_p(HGA_VALUE_PORT) == val);
return val;
@@ -244,7 +244,7 @@ static void hga_show_logo(struct fb_info *info)
void __iomem *dest = hga_vram;
char *logo = linux_logo_bw;
int x, y;
-
+
for (y = 134; y < 134 + 80 ; y++) * this needs some cleanup *
for (x = 0; x < 10 ; x++)
writeb(~*(logo++),(dest + HGA_ROWADDR(y) + x + 40));
@@ -255,7 +255,7 @@ static void hga_pan(unsigned int xoffset, unsigned int yoffset)
{
unsigned int base;
unsigned long flags;
-
+
base = (yoffset / 8) * 90 + xoffset;
spin_lock_irqsave(&hga_reg_lock, flags);
write_hga_w(base, 0x0c); /* start address */
@@ -310,7 +310,7 @@ static int hga_card_detect(void)
/* Ok, there is definitely a card registering at the correct
* memory location, so now we do an I/O port test.
*/
-
+
if (!test_hga_b(0x66, 0x0f)) /* cursor low register */
goto error;
@@ -321,7 +321,7 @@ static int hga_card_detect(void)
* bit of the status register is changing. This test lasts for
* approximately 1/10th of a second.
*/
-
+
p_save = q_save = inb_p(HGA_STATUS_PORT) & HGA_STATUS_VSYNC;
for (count=0; count < 50000 && p_save == q_save; count++) {
@@ -329,7 +329,7 @@ static int hga_card_detect(void)
udelay(2);
}
- if (p_save == q_save)
+ if (p_save == q_save)
goto error;
switch (inb_p(HGA_STATUS_PORT) & 0x70) {
@@ -415,7 +415,7 @@ static int hgafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* @info:pointer to fb_info object containing info for current hga board
*
* This function looks only at xoffset, yoffset and the %FB_VMODE_YWRAP
- * flag in @var. If input parameters are correct it calls hga_pan() to
+ * flag in @var. If input parameters are correct it calls hga_pan() to
* program the hardware. @info->var is updated to the new values.
* A zero is returned on success and %-EINVAL for failure.
*/
@@ -442,9 +442,9 @@ static int hgafb_pan_display(struct fb_var_screeninfo *var,
* hgafb_blank - (un)blank the screen
* @blank_mode:blanking method to use
* @info:unused
- *
- * Blank the screen if blank_mode != 0, else unblank.
- * Implements VESA suspend and powerdown modes on hardware that supports
+ *
+ * Blank the screen if blank_mode != 0, else unblank.
+ * Implements VESA suspend and powerdown modes on hardware that supports
* disabling hsync/vsync:
* @blank_mode == 2 means suspend vsync,
* @blank_mode == 3 means suspend hsync,
@@ -539,15 +539,15 @@ static const struct fb_ops hgafb_ops = {
.fb_copyarea = hgafb_copyarea,
.fb_imageblit = hgafb_imageblit,
};
-
+
/* ------------------------------------------------------------------------- *
*
* Functions in fb_info
- *
+ *
* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
-
+
/*
* Initialization
*/
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index cdd44e5deafe..77fbff47b1a8 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -92,7 +92,7 @@ static int hpfb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno >= info->cmap.len)
return 1;
-
+
while (in_be16(fb_regs + 0x6002) & 0x4) udelay(1);
out_be16(fb_regs + 0x60ba, 0xff);
@@ -143,7 +143,7 @@ static void topcat_blit(int x0, int y0, int x1, int y1, int w, int h, int rr)
out_8(fb_regs + WMOVE, fb_bitmask);
}
-static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+static void hpfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
topcat_blit(area->sx, area->sy, area->dx, area->dy, area->width, area->height, RR_COPY);
}
@@ -315,7 +315,7 @@ unmap_screen_base:
return ret;
}
-/*
+/*
* Check that the secondary ID indicates that we have some hope of working with this
* framebuffer. The catseye boards are pretty much like topcats and we can muddle through.
*/
@@ -323,7 +323,7 @@ unmap_screen_base:
#define topcat_sid_ok(x) (((x) == DIO_ID2_LRCATSEYE) || ((x) == DIO_ID2_HRCCATSEYE) \
|| ((x) == DIO_ID2_HRMCATSEYE) || ((x) == DIO_ID2_TOPCAT))
-/*
+/*
* Initialise the framebuffer
*/
static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent)
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index bea45647184e..975dd682fae4 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1347,7 +1347,7 @@ static const struct fb_ops imsttfb_ops = {
.fb_ioctl = imsttfb_ioctl,
};
-static void init_imstt(struct fb_info *info)
+static int init_imstt(struct fb_info *info)
{
struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
@@ -1420,7 +1420,7 @@ static void init_imstt(struct fb_info *info)
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
- return;
+ return -ENODEV;
}
sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP");
@@ -1456,12 +1456,13 @@ static void init_imstt(struct fb_info *info)
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
- return;
+ return -ENODEV;
}
tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8;
fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n",
info->fix.id, info->fix.smem_len >> 20, tmp);
+ return 0;
}
static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1529,10 +1530,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!par->cmap_regs)
goto error;
info->pseudo_palette = par->palette;
- init_imstt(info);
-
- pci_set_drvdata(pdev, info);
- return 0;
+ ret = init_imstt(info);
+ if (!ret)
+ pci_set_drvdata(pdev, info);
+ return ret;
error:
if (par->dc_regs)
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index 312e35c9aa6c..44ff860a3f37 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -339,7 +339,7 @@ static int civic_setpalette(unsigned int regno, unsigned int red,
{
unsigned long flags;
int clut_status;
-
+
local_irq_save(flags);
/* Set the register address */
@@ -439,7 +439,7 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure).
* Return non-zero for invalid regno.
*/
-
+
if (regno >= fb_info->cmap.len)
return 1;
@@ -548,7 +548,7 @@ static int __init macfb_init(void)
return -ENODEV;
macfb_setup(option);
- if (!MACH_IS_MAC)
+ if (!MACH_IS_MAC)
return -ENODEV;
if (mac_bi_data.id == MAC_MODEL_Q630 ||
@@ -644,7 +644,7 @@ static int __init macfb_init(void)
err = -EINVAL;
goto fail_unmap;
}
-
+
/*
* We take a wild guess that if the video physical address is
* in nubus slot space, that the nubus card is driving video.
@@ -774,7 +774,7 @@ static int __init macfb_init(void)
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
break;
-
+
/*
* Assorted weirdos
* We think this may be like the LC II
diff --git a/drivers/video/fbdev/maxinefb.c b/drivers/video/fbdev/maxinefb.c
index ae1a42bcb0ea..4e6b05232ae2 100644
--- a/drivers/video/fbdev/maxinefb.c
+++ b/drivers/video/fbdev/maxinefb.c
@@ -138,7 +138,7 @@ int __init maxinefb_init(void)
*(volatile unsigned char *)fboff = 0x0;
maxinefb_fix.smem_start = fb_start;
-
+
/* erase hardware cursor */
for (i = 0; i < 512; i++) {
maxinefb_ims332_write_register(IMS332_REG_CURSOR_RAM + i,
diff --git a/drivers/video/fbdev/p9100.c b/drivers/video/fbdev/p9100.c
index 3e44f9516318..0876962c52eb 100644
--- a/drivers/video/fbdev/p9100.c
+++ b/drivers/video/fbdev/p9100.c
@@ -65,7 +65,7 @@ static const struct fb_ops p9100_ops = {
#define P9100_FB_OFF 0x0UL
/* 3 bits: 2=8bpp 3=16bpp 5=32bpp 7=24bpp */
-#define SYS_CONFIG_PIXELSIZE_SHIFT 26
+#define SYS_CONFIG_PIXELSIZE_SHIFT 26
#define SCREENPAINT_TIMECTL1_ENABLE_VIDEO 0x20 /* 0 = off, 1 = on */
@@ -110,7 +110,7 @@ struct p9100_regs {
u32 vram_xxx[25];
/* Registers for IBM RGB528 Palette */
- u32 ramdac_cmap_wridx;
+ u32 ramdac_cmap_wridx;
u32 ramdac_palette_data;
u32 ramdac_pixel_mask;
u32 ramdac_palette_rdaddr;
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 82f019f0a0d6..f8283fcd5edb 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -52,17 +52,17 @@ struct fb_info_platinum {
__u8 red, green, blue;
} palette[256];
u32 pseudo_palette[16];
-
+
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
-
+
volatile struct platinum_regs __iomem *platinum_regs;
unsigned long platinum_regs_phys;
-
+
__u8 __iomem *frame_buffer;
volatile __u8 __iomem *base_frame_buffer;
unsigned long frame_buffer_phys;
-
+
unsigned long total_vram;
int clktype;
int dactype;
@@ -133,7 +133,7 @@ static int platinumfb_set_par (struct fb_info *info)
platinum_set_hardware(pinfo);
init = platinum_reg_init[pinfo->vmode-1];
-
+
if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8))
offset = 0x10;
@@ -214,7 +214,7 @@ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
break;
}
}
-
+
return 0;
}
@@ -269,7 +269,7 @@ static void platinum_set_hardware(struct fb_info_platinum *pinfo)
struct platinum_regvals *init;
int i;
int vmode, cmode;
-
+
vmode = pinfo->vmode;
cmode = pinfo->cmode;
@@ -436,7 +436,7 @@ static int read_platinum_sense(struct fb_info_platinum *info)
* This routine takes a user-supplied var, and picks the best vmode/cmode from it.
* It also updates the var structure to the actual mode data obtained
*/
-static int platinum_var_to_par(struct fb_var_screeninfo *var,
+static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only)
{
@@ -478,12 +478,12 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
pinfo->yoffset = 0;
pinfo->vxres = pinfo->xres;
pinfo->vyres = pinfo->yres;
-
+
return 0;
}
-/*
+/*
* Parse user specified options (`video=platinumfb:')
*/
static int __init platinumfb_setup(char *options)
@@ -624,7 +624,7 @@ static int platinumfb_probe(struct platform_device* odev)
break;
}
dev_set_drvdata(&odev->dev, info);
-
+
rc = platinum_init_fb(info);
if (rc != 0) {
iounmap(pinfo->frame_buffer);
@@ -640,9 +640,9 @@ static void platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
-
+
unregister_framebuffer (info);
-
+
/* Unmap frame buffer and registers */
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
@@ -656,7 +656,7 @@ static void platinumfb_remove(struct platform_device* odev)
framebuffer_release(info);
}
-static struct of_device_id platinumfb_match[] =
+static struct of_device_id platinumfb_match[] =
{
{
.name = "platinum",
@@ -664,7 +664,7 @@ static struct of_device_id platinumfb_match[] =
{},
};
-static struct platform_driver platinum_driver =
+static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index b1b8ccdbac4a..a2408bf00ca0 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -57,14 +57,14 @@
* - Driver appears to be working for Brutus 320x200x8bpp mode. Other
* resolutions are working, but only the 8bpp mode is supported.
* Changes need to be made to the palette encode and decode routines
- * to support 4 and 16 bpp modes.
+ * to support 4 and 16 bpp modes.
* Driver is not designed to be a module. The FrameBuffer is statically
- * allocated since dynamic allocation of a 300k buffer cannot be
- * guaranteed.
+ * allocated since dynamic allocation of a 300k buffer cannot be
+ * guaranteed.
*
* 1999/06/17:
* - FrameBuffer memory is now allocated at run-time when the
- * driver is initialized.
+ * driver is initialized.
*
* 2000/04/10: Nicolas Pitre <nico@fluxnic.net>
* - Big cleanup for dynamic selection of machine type at run time.
@@ -74,8 +74,8 @@
*
* 2000/08/07: Tak-Shing Chan <tchan.rd@idthk.com>
* Jeff Sutherland <jsutherland@accelent.com>
- * - Resolved an issue caused by a change made to the Assabet's PLD
- * earlier this year which broke the framebuffer driver for newer
+ * - Resolved an issue caused by a change made to the Assabet's PLD
+ * earlier this year which broke the framebuffer driver for newer
* Phase 4 Assabets. Some other parameters were changed to optimize
* for the Sharp display.
*
@@ -102,7 +102,7 @@
* 2000/11/23: Eric Peng <ericpeng@coventive.com>
* - Freebird add
*
- * 2001/02/07: Jamey Hicks <jamey.hicks@compaq.com>
+ * 2001/02/07: Jamey Hicks <jamey.hicks@compaq.com>
* Cliff Brake <cbrake@accelent.com>
* - Added PM callback
*
@@ -500,7 +500,7 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
* the shortest recovery time
* Suspend
* This refers to a level of power management in which substantial power
- * reduction is achieved by the display. The display can have a longer
+ * reduction is achieved by the display. The display can have a longer
* recovery time from this state than from the Stand-by state
* Off
* This indicates that the display is consuming the lowest level of power
@@ -522,9 +522,9 @@ sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
*/
/*
* sa1100fb_blank():
- * Blank the display by setting all palette values to zero. Note, the
+ * Blank the display by setting all palette values to zero. Note, the
* 12 and 16 bpp modes don't really use the palette, so this will not
- * blank the display in all modes.
+ * blank the display in all modes.
*/
static int sa1100fb_blank(int blank, struct fb_info *info)
{
@@ -603,8 +603,8 @@ static inline unsigned int get_pcd(struct sa1100fb_info *fbi,
/*
* sa1100fb_activate_var():
- * Configures LCD Controller based on entries in var parameter. Settings are
- * only written to the controller if changes were made.
+ * Configures LCD Controller based on entries in var parameter. Settings are
+ * only written to the controller if changes were made.
*/
static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *fbi)
{
@@ -747,7 +747,7 @@ static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi)
*
* SA1110 spec update nr. 25 says we can and should
* clear LDD15 to 12 for 4 or 8bpp modes with active
- * panels.
+ * panels.
*/
if ((fbi->reg_lccr0 & LCCR0_CMS) == LCCR0_Color &&
(fbi->reg_lccr0 & (LCCR0_Dual|LCCR0_Act)) != 0) {
@@ -1020,9 +1020,9 @@ static int sa1100fb_resume(struct platform_device *dev)
/*
* sa1100fb_map_video_memory():
- * Allocates the DRAM memory for the frame buffer. This buffer is
- * remapped into a non-cached, non-buffered, memory region to
- * allow palette and pixel writes to occur without flushing the
+ * Allocates the DRAM memory for the frame buffer. This buffer is
+ * remapped into a non-cached, non-buffered, memory region to
+ * allow palette and pixel writes to occur without flushing the
* cache. Once this area is remapped, all virtual memory
* access to the video memory should occur at the new region.
*/
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 01363dccfdaf..66d82f6d17c7 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1413,6 +1413,7 @@ out_err1:
iounmap(info->screen_base);
out_err0:
kfree(fb);
+ sti->info = NULL;
return -ENXIO;
}
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 1007023a5e88..b166b7cfe0e5 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -1,7 +1,7 @@
/*
* valkyriefb.c -- frame buffer device for the PowerMac 'valkyrie' display
*
- * Created 8 August 1998 by
+ * Created 8 August 1998 by
* Martin Costabel <costabel@wanadoo.fr> and Kevin Schoedel
*
* Vmode-switching changes and vmode 15/17 modifications created 29 August
@@ -77,13 +77,13 @@ struct fb_info_valkyrie {
struct fb_par_valkyrie par;
struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
-
+
struct valkyrie_regs __iomem *valkyrie_regs;
unsigned long valkyrie_regs_phys;
-
+
__u8 __iomem *frame_buffer;
unsigned long frame_buffer_phys;
-
+
int sense;
unsigned long total_vram;
@@ -244,7 +244,7 @@ static inline int valkyrie_vram_reqd(int video_mode, int color_mode)
{
int pitch;
struct valkyrie_regvals *init = valkyrie_reg_init[video_mode-1];
-
+
if ((pitch = init->pitch[color_mode]) == 0)
pitch = 2 * init->pitch[0];
return init->vres * pitch;
@@ -467,7 +467,7 @@ static int valkyrie_var_to_par(struct fb_var_screeninfo *var,
printk(KERN_ERR "valkyriefb: vmode %d not valid.\n", vmode);
return -EINVAL;
}
-
+
if (cmode != CMODE_8 && cmode != CMODE_16) {
printk(KERN_ERR "valkyriefb: cmode %d not valid.\n", cmode);
return -EINVAL;
@@ -516,7 +516,7 @@ static void valkyrie_init_fix(struct fb_fix_screeninfo *fix, struct fb_info_valk
fix->ywrapstep = 0;
fix->ypanstep = 0;
fix->xpanstep = 0;
-
+
}
/* Fix must already be inited above */
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 7d71a6fd1cdd..cf3c72754ce7 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -111,7 +111,7 @@ static u_long get_line_length(int xres_virtual, int bpp)
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
- * data from it to check this var.
+ * data from it to check this var.
*/
static int vfb_check_var(struct fb_var_screeninfo *var,
@@ -169,7 +169,7 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
/*
* Now that we checked it we alter var. The reason being is that the video
- * mode passed in might not work but slight changes to it might make it
+ * mode passed in might not work but slight changes to it might make it
* work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
@@ -235,8 +235,8 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
}
/* This routine actually sets the video mode. It's in here where we
- * the hardware state info->par and fix which can be affected by the
- * change in par. For this driver it doesn't do much.
+ * the hardware state info->par and fix which can be affected by the
+ * change in par. For this driver it doesn't do much.
*/
static int vfb_set_par(struct fb_info *info)
{
@@ -379,7 +379,7 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
}
/*
- * Most drivers don't need their own mmap function
+ * Most drivers don't need their own mmap function
*/
static int vfb_mmap(struct fb_info *info,