summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-02 16:47:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-02 16:47:49 -0700
commit56d33754481fe0dc7436dc4ee4fbd44b3039361d (patch)
treeef594446d753c31b0a4aee45aa831b834b924326 /include
parent464fddbba1dfbc219f1e9145127a482d2159dee5 (diff)
parentd9bd054177fbd2c4762546aec40fc3071bfe4cc0 (diff)
downloadlinux-stable-56d33754481fe0dc7436dc4ee4fbd44b3039361d.tar.gz
linux-stable-56d33754481fe0dc7436dc4ee4fbd44b3039361d.tar.bz2
linux-stable-56d33754481fe0dc7436dc4ee4fbd44b3039361d.zip
Merge tag 'drm-next-2021-11-03' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "Summary below. i915 starts to add support for DG2 GPUs, enables DG1 and ADL-S support by default, lots of work to enable DisplayPort 2.0 across drivers. Lots of documentation updates and fixes across the board. core: - improve dma_fence, lease and resv documentation - shmem-helpers: allocate WC pages on x86, use vmf_insert_pin - sched fixes/improvements - allow empty drm leases - add dma resv iterator - add more DP 2.0 headers - DP MST helper improvements for DP2.0 dma-buf: - avoid warnings, remove fence trace macros bridge: - new helper to get rid of panels - probe improvements for it66121 - enable DSI EOTP for anx7625 fbdev: - efifb: release runtime PM on destroy ttm: - kerneldoc switch - helper to clear all DMA mappings - pool shrinker optimizaton - remove ttm_tt_destroy_common - update ttm_move_memcpy for async use panel: - add new panel-edp driver amdgpu: - Initial DP 2.0 support - Initial USB4 DP tunnelling support - Aldebaran MCE support - Modifier support for DCC image stores for GFX 10.3 - Display rework for better FP code handling - Yellow Carp/Cyan Skillfish updates - Cyan Skillfish display support - convert vega/navi to IP discovery asic enumeration - validate IP discovery table - RAS improvements - Lots of fixes i915: - DG1 PCI IDs + LMEM discovery/placement - DG1 GuC submission by default - ADL-S PCI IDs updated + enabled by default - ADL-P (XE_LPD) fixed and updates - DG2 display fixes - PXP protected object support for Gen12 integrated - expose multi-LRC submission interface for GuC - export logical engine instance to user - Disable engine bonding on Gen12+ - PSR cleanup - PSR2 selective fetch by default - DP 2.0 prep work - VESA vendor block + MSO use of it - FBC refactor - try again to fix fast-narrow vs slow-wide eDP training - use THP when IOMMU enabled - LMEM backup/restore for suspend/resume - locking simplification - GuC major reworking - async flip VT-D workaround changes - DP link training improvements - misc display refactorings bochs: - new PCI ID rcar-du: - Non-contiguious buffer import support for rcar-du - r8a779a0 support prep omapdrm: - COMPILE_TEST fixes sti: - COMPILE_TEST fixes msm: - fence ordering improvements - eDP support in DP sub-driver - dpu irq handling cleanup - CRC support for making igt happy - NO_CONNECTOR bridge support - dsi: 14nm phy support for msm8953 - mdp5: msm8x53, sdm450, sdm632 support stm: - layer alpha + zpo support v3d: - fix Vulkan CTS failure - support multiple sync objects gud: - add R8/RGB332/RGB888 pixel formats vc4: - convert to new bridge helpers vgem: - use shmem helpers virtio: - support mapping exported vram zte: - remove obsolete driver rockchip: - use bridge attach no connector for LVDS/RGB" * tag 'drm-next-2021-11-03' of git://anongit.freedesktop.org/drm/drm: (1259 commits) drm/amdgpu/gmc6: fix DMA mask from 44 to 40 bits drm/amd/display: MST support for DPIA drm/amdgpu: Fix even more out of bound writes from debugfs drm/amdgpu/discovery: add SDMA IP instance info for soc15 parts drm/amdgpu/discovery: add UVD/VCN IP instance info for soc15 parts drm/amdgpu/UAPI: rearrange header to better align related items drm/amd/display: Enable dpia in dmub only for DCN31 B0 drm/amd/display: Fix USB4 hot plug crash issue drm/amd/display: Fix deadlock when falling back to v2 from v3 drm/amd/display: Fallback to clocks which meet requested voltage on DCN31 drm/amd/display: move FPU associated DCN301 code to DML folder drm/amd/display: fix link training regression for 1 or 2 lane drm/amd/display: add two lane settings training options drm/amd/display: decouple hw_lane_settings from dpcd_lane_settings drm/amd/display: implement decide lane settings drm/amd/display: adopt DP2.0 LT SCR revision 8 drm/amd/display: FEC configuration for dpia links in MST mode drm/amd/display: FEC configuration for dpia links drm/amd/display: Add workaround flag for EDID read on certain docks drm/amd/display: Set phy_mux_sel bit in dmub scratch register ...
Diffstat (limited to 'include')
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/drm_bridge.h23
-rw-r--r--include/drm/drm_connector.h37
-rw-r--r--include/drm/drm_displayid.h101
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h5
-rw-r--r--include/drm/drm_edid.h47
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_ioctl.h1
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_mode_config.h13
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_print.h30
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/gpu_scheduler.h188
-rw-r--r--include/drm/gud.h6
-rw-r--r--include/drm/i915_component.h1
-rw-r--r--include/drm/i915_pciids.h8
-rw-r--r--include/drm/i915_pxp_tee_interface.h42
-rw-r--r--include/drm/ttm/ttm_bo_api.h12
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/drm/ttm/ttm_caching.h17
-rw-r--r--include/drm/ttm/ttm_device.h79
-rw-r--r--include/drm/ttm/ttm_placement.h1
-rw-r--r--include/drm/ttm/ttm_pool.h5
-rw-r--r--include/drm/ttm/ttm_range_manager.h18
-rw-r--r--include/drm/ttm/ttm_resource.h9
-rw-r--r--include/drm/ttm/ttm_tt.h98
-rw-r--r--include/linux/dma-buf.h9
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-resv.h199
-rw-r--r--include/linux/io.h5
-rw-r--r--include/linux/seqno-fence.h109
-rw-r--r--include/linux/shrinker.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h13
-rw-r--r--include/uapi/drm/drm_fourcc.h12
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/i915_drm.h242
-rw-r--r--include/uapi/drm/v3d_drm.h78
-rw-r--r--include/uapi/drm/virtgpu_drm.h27
-rw-r--r--include/uapi/linux/virtio_gpu.h18
41 files changed, 1207 insertions, 323 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 0f66a0d9f06d..90b69270f2fa 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -62,6 +62,7 @@ enum amd_asic_type {
CHIP_DIMGREY_CAVEFISH, /* 33 */
CHIP_BEIGE_GOBY, /* 34 */
CHIP_YELLOW_CARP, /* 35 */
+ CHIP_IP_DISCOVERY, /* 36 */
CHIP_LAST,
};
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 46bdfa48c413..061d87313fac 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -790,11 +790,19 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags);
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
@@ -914,4 +922,17 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
#endif
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
+ u32 port, u32 endpoint);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1647960c9e50..379746d3266f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -590,6 +590,18 @@ struct drm_display_info {
* @monitor_range: Frequency range supported by monitor range descriptor
*/
struct drm_monitor_range_info monitor_range;
+
+ /**
+ * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+ * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+ * Transport (SST), or 2 or 4 MSO streams.
+ */
+ u8 mso_stream_count;
+
+ /**
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -1084,6 +1096,14 @@ struct drm_connector_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_connector_state *state);
+
+ /**
+ * @oob_hotplug_event:
+ *
+ * This will get called when a hotplug-event for a drm-connector
+ * has been received from a source outside the display driver / device.
+ */
+ void (*oob_hotplug_event)(struct drm_connector *connector);
};
/**
@@ -1228,6 +1248,14 @@ struct drm_connector {
struct device *kdev;
/** @attr: sysfs attributes */
struct device_attribute *attr;
+ /**
+ * @fwnode: associated fwnode supplied by platform firmware
+ *
+ * Drivers can set this to associate a fwnode with a connector, drivers
+ * are expected to get a reference on the fwnode when setting this.
+ * drm_connector_cleanup() will call fwnode_handle_put() on this.
+ */
+ struct fwnode_handle *fwnode;
/**
* @head:
@@ -1239,6 +1267,14 @@ struct drm_connector {
*/
struct list_head head;
+ /**
+ * @global_connector_list_entry:
+ *
+ * Connector entry in the global connector-list, used by
+ * drm_connector_find_by_fwnode().
+ */
+ struct list_head global_connector_list_entry;
+
/** @base: base KMS object */
struct drm_mode_object base;
@@ -1650,6 +1686,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index ec64d141f578..7ffbd9f7bfc7 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -23,38 +23,71 @@
#define DRM_DISPLAYID_H
#include <linux/types.h>
+#include <linux/bits.h>
struct edid;
-#define DATA_BLOCK_PRODUCT_ID 0x00
-#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
-#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
-#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
-#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
-#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
-#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
-#define DATA_BLOCK_VESA_TIMING 0x07
-#define DATA_BLOCK_CEA_TIMING 0x08
-#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
-#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
-#define DATA_BLOCK_GP_ASCII_STRING 0x0b
-#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
-#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
-#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
-#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
-#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
-#define DATA_BLOCK_TILED_DISPLAY 0x12
-#define DATA_BLOCK_CTA 0x81
-
-#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
-
-#define PRODUCT_TYPE_EXTENSION 0
-#define PRODUCT_TYPE_TEST 1
-#define PRODUCT_TYPE_PANEL 2
-#define PRODUCT_TYPE_MONITOR 3
-#define PRODUCT_TYPE_TV 4
-#define PRODUCT_TYPE_REPEATER 5
-#define PRODUCT_TYPE_DIRECT_DRIVE 6
+#define VESA_IEEE_OUI 0x3a0292
+
+/* DisplayID Structure versions */
+#define DISPLAY_ID_STRUCTURE_VER_12 0x12
+#define DISPLAY_ID_STRUCTURE_VER_20 0x20
+
+/* DisplayID Structure v1r2 Data Blocks */
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+#define DATA_BLOCK_CTA 0x81
+
+/* DisplayID Structure v2r0 Data Blocks */
+#define DATA_BLOCK_2_PRODUCT_ID 0x20
+#define DATA_BLOCK_2_DISPLAY_PARAMETERS 0x21
+#define DATA_BLOCK_2_TYPE_7_DETAILED_TIMING 0x22
+#define DATA_BLOCK_2_TYPE_8_ENUMERATED_TIMING 0x23
+#define DATA_BLOCK_2_TYPE_9_FORMULA_TIMING 0x24
+#define DATA_BLOCK_2_DYNAMIC_VIDEO_TIMING 0x25
+#define DATA_BLOCK_2_DISPLAY_INTERFACE_FEATURES 0x26
+#define DATA_BLOCK_2_STEREO_DISPLAY_INTERFACE 0x27
+#define DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY 0x28
+#define DATA_BLOCK_2_CONTAINER_ID 0x29
+#define DATA_BLOCK_2_VENDOR_SPECIFIC 0x7e
+#define DATA_BLOCK_2_CTA_DISPLAY_ID 0x81
+
+/* DisplayID Structure v1r2 Product Type */
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+/* DisplayID Structure v2r0 Display Product Primary Use Case (~Product Type) */
+#define PRIMARY_USE_EXTENSION 0
+#define PRIMARY_USE_TEST 1
+#define PRIMARY_USE_GENERIC 2
+#define PRIMARY_USE_TV 3
+#define PRIMARY_USE_DESKTOP_PRODUCTIVITY 4
+#define PRIMARY_USE_DESKTOP_GAMING 5
+#define PRIMARY_USE_PRESENTATION 6
+#define PRIMARY_USE_HEAD_MOUNTED_VR 7
+#define PRIMARY_USE_HEAD_MOUNTED_AR 8
struct displayid_header {
u8 rev;
@@ -96,6 +129,16 @@ struct displayid_detailed_timing_block {
struct displayid_detailed_timings_1 timings[];
};
+#define DISPLAYID_VESA_MSO_OVERLAP GENMASK(3, 0)
+#define DISPLAYID_VESA_MSO_MODE GENMASK(6, 5)
+
+struct displayid_vesa_vendor_specific_block {
+ struct displayid_block base;
+ u8 oui[3];
+ u8 data_structure_type;
+ u8 mso;
+} __packed;
+
/* DisplayID iteration */
struct displayid_iter {
const struct edid *edid;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1d5b3dbb6e56..b52df4db3e8f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -453,6 +453,7 @@ struct drm_panel;
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
@@ -537,6 +538,9 @@ struct drm_panel;
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -688,6 +692,7 @@ struct drm_panel;
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE BIT(0)
@@ -743,6 +748,7 @@ struct drm_panel;
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -865,6 +871,8 @@ struct drm_panel;
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
# define DP_PHY_TEST_PATTERN_CP2520 0x5
+#define DP_PHY_SQUARE_PATTERN 0x249
+
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
@@ -1109,6 +1117,18 @@ struct drm_panel;
#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+
/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
@@ -1319,6 +1339,10 @@ struct drm_panel;
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
+# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
+/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
enum drm_dp_phy {
DP_PHY_DPRX,
@@ -1490,6 +1514,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index ddb9231d0309..78044ac5b59b 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -554,6 +554,8 @@ struct drm_dp_mst_topology_state {
struct drm_private_state base;
struct list_head vcpis;
struct drm_dp_mst_topology_mgr *mgr;
+ u8 total_avail_slots;
+ u8 start_slot;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -806,6 +808,7 @@ int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
@@ -815,7 +818,7 @@ int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn);
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index deccfd39e6db..18f6c700f6d0 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -508,6 +508,52 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
}
+/**
+ * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
+ * @product_id: The 16-bit product ID.
+ *
+ * This is a macro so that it can be calculated at compile time and used
+ * as an initializer.
+ *
+ * For instance:
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
+ *
+ * Return: a 32-bit ID per panel.
+ */
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
+ ((product_id) & 0xffff))
+
+/**
+ * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
+ * @panel_id: The panel ID to decode.
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ * @product_id: The product ID will be returned here.
+ *
+ * For instance, after:
+ * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
+ * These will be true:
+ * vend[0] = 'B'
+ * vend[1] = 'O'
+ * vend[2] = 'E'
+ * vend[3] = '\0'
+ * product_id = 0x2d08
+ */
+static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
+{
+ *product_id = (u16)(panel_id & 0xffff);
+ vend[0] = '@' + ((panel_id >> 26) & 0x1f);
+ vend[1] = '@' + ((panel_id >> 21) & 0x1f);
+ vend[2] = '@' + ((panel_id >> 16) & 0x1f);
+ vend[3] = '\0';
+}
+
bool drm_probe_ddc(struct i2c_adapter *adapter);
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
@@ -515,6 +561,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 4e0258a61311..e86925cf07b9 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -16,12 +16,16 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vadd
struct drm_rect *clip);
void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool cached);
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip);
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index afb27cb6a7bd..6ed61c371f6c 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -167,7 +167,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index af7ba8071eb0..147e51b6d241 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -227,9 +227,13 @@ struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 1ddf7783fdf7..48b7de80daf5 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -103,14 +103,13 @@ struct drm_mode_config_funcs {
* Callback used by helpers to inform the driver of output configuration
* changes.
*
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
+ * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
+ * to call this hook to inform the fbdev helper of output changes.
*
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
+ * This hook is deprecated, drivers should instead use
+ * drm_fbdev_generic_setup() which takes care of any necessary
+ * hotplug event forwarding already without further involvement by
+ * the driver.
*/
void (*output_poll_changed)(struct drm_device *dev);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index fed97e35626f..0c1102dc4d88 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -43,7 +43,7 @@ enum drm_scaling_filter {
/**
* struct drm_plane_state - mutable plane state
*
- * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
* raw coordinates provided by userspace. Drivers should use
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 15a089a87c22..22fabdeed297 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -340,6 +340,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR() - Error output.
*
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -349,6 +351,9 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
*
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*
@@ -364,9 +369,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -379,6 +386,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG() - Debug output for generic drm code
*
+ * NOTE: this is deprecated in favor of drm_dbg_core().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -387,6 +396,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
*
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -395,6 +406,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
*
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -480,47 +493,63 @@ void __drm_err(const char *format, ...);
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
@@ -536,6 +565,7 @@ void __drm_err(const char *format, ...);
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms_ratelimited(NULL, ...). */
#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
/*
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..04c57564c397 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -18,6 +18,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
void drm_kms_helper_poll_disable(struct drm_device *dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 88ae7f331bb1..f011e4c407f2 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,9 +27,12 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
@@ -50,56 +53,147 @@ enum drm_sched_priority {
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @priority: priority of the entity
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @rq_lock, but readers are generally lockless and seem to just race
+ * with not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ */
enum drm_sched_priority priority;
+
+ /**
+ * @rq_lock:
+ *
+ * Lock to modify the runqueue to which this entity belongs.
+ */
spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by the scheduler thread, can be accessed locklessly from
+ * drm_sched_job_arm() iff the queue is empty.
+ */
struct dma_fence *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
};
@@ -198,6 +292,17 @@ struct drm_sched_job {
enum drm_sched_priority s_priority;
struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
+
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned long last_dependency;
};
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
@@ -220,9 +325,15 @@ enum drm_gpu_sched_stat {
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @dependency:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * If a driver exclusively uses drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies() this can be ommitted and
+ * left as NULL.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
@@ -348,6 +459,14 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_arm(struct drm_sched_job *job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+
+
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
@@ -381,14 +500,17 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-struct drm_sched_fence *drm_sched_fence_create(
+struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner);
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity);
+void drm_sched_fence_free(struct drm_sched_fence *fence);
+
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
diff --git a/include/drm/gud.h b/include/drm/gud.h
index 0b46b54fe56e..c52a8ba4ae4e 100644
--- a/include/drm/gud.h
+++ b/include/drm/gud.h
@@ -246,10 +246,12 @@ struct gud_state_req {
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
#define GUD_REQ_GET_FORMATS 0x40
#define GUD_FORMATS_MAX_NUM 32
- /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
- #define GUD_PIXEL_FORMAT_R1 0x01
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
#define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 55c3b123581b..c1e2a43d2d1e 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -29,6 +29,7 @@
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
+ I915_COMPONENT_PXP
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index eee18fa53b54..c00ac54692d7 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -632,18 +632,16 @@
INTEL_VGA_DEVICE(0x4905, info), \
INTEL_VGA_DEVICE(0x4906, info), \
INTEL_VGA_DEVICE(0x4907, info), \
- INTEL_VGA_DEVICE(0x4908, info)
+ INTEL_VGA_DEVICE(0x4908, info), \
+ INTEL_VGA_DEVICE(0x4909, info)
/* ADL-S */
#define INTEL_ADLS_IDS(info) \
INTEL_VGA_DEVICE(0x4680, info), \
- INTEL_VGA_DEVICE(0x4681, info), \
INTEL_VGA_DEVICE(0x4682, info), \
- INTEL_VGA_DEVICE(0x4683, info), \
INTEL_VGA_DEVICE(0x4688, info), \
- INTEL_VGA_DEVICE(0x4689, info), \
+ INTEL_VGA_DEVICE(0x468A, info), \
INTEL_VGA_DEVICE(0x4690, info), \
- INTEL_VGA_DEVICE(0x4691, info), \
INTEL_VGA_DEVICE(0x4692, info), \
INTEL_VGA_DEVICE(0x4693, info)
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
new file mode 100644
index 000000000000..af593ec64469
--- /dev/null
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I915_PXP_TEE_INTERFACE_H_
+#define _I915_PXP_TEE_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+
+/**
+ * struct i915_pxp_component_ops - ops for PXP services.
+ * @owner: Module providing the ops
+ * @send: sends data to PXP
+ * @receive: receives data from PXP
+ */
+struct i915_pxp_component_ops {
+ /**
+ * @owner: owner of the module provding the ops
+ */
+ struct module *owner;
+
+ int (*send)(struct device *dev, const void *message, size_t size);
+ int (*recv)(struct device *dev, void *buffer, size_t size);
+};
+
+/**
+ * struct i915_pxp_component - Used for communication between i915 and TEE
+ * drivers for the PXP services
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ * @pxp_ops: Ops implemented by TEE driver, used by i915 driver.
+ */
+struct i915_pxp_component {
+ struct device *tee_dev;
+ const struct i915_pxp_component_ops *ops;
+
+ /* To protect the above members. */
+ struct mutex mutex;
+};
+
+#endif /* _I915_TEE_PXP_INTERFACE_H_ */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index f681bbdbc698..76d7c33884da 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -265,18 +265,6 @@ static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_opera
}
/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_resource indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem,
- uint32_t *new_flags);
-
-/**
* ttm_bo_validate
*
* @bo: The buffer object.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 68d6069572aa..5f087575194b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -322,7 +322,7 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
*/
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-void ttm_move_memcpy(struct ttm_buffer_object *bo,
+void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter);
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index 3c9dd65f5aaf..235a743d90e1 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -27,9 +27,26 @@
#define TTM_NUM_CACHING_TYPES 3
+/**
+ * enum ttm_caching - CPU caching and BUS snooping behavior.
+ */
enum ttm_caching {
+ /**
+ * @ttm_uncached: Most defensive option for device mappings,
+ * don't even allow write combining.
+ */
ttm_uncached,
+
+ /**
+ * @ttm_write_combined: Don't cache read accesses, but allow at least
+ * writes to be combined.
+ */
ttm_write_combined,
+
+ /**
+ * @ttm_cached: Fully cached like normal system memory, requires that
+ * devices snoop the CPU cache on accesses.
+ */
ttm_cached
};
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index cd592f8e941b..0a4ddec78d8f 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -39,31 +39,23 @@ struct ttm_operation_ctx;
/**
* struct ttm_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
*/
extern struct ttm_global {
/**
- * Constant after init.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages. Constant after init.
*/
-
struct page *dummy_read_page;
/**
- * Protected by ttm_global_mutex.
+ * @device_list: List of buffer object devices. Protected by
+ * ttm_global_mutex.
*/
struct list_head device_list;
/**
- * Internal protection.
+ * @bo_count: Number of buffer objects allocated by devices.
*/
atomic_t bo_count;
} ttm_glob;
@@ -73,7 +65,7 @@ struct ttm_device_funcs {
* ttm_tt_create
*
* @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -230,49 +222,64 @@ struct ttm_device_funcs {
/**
* struct ttm_device - Buffer object driver device-specific data.
- *
- * @device_list: Our entry in the global device list.
- * @funcs: Function table for the device.
- * @sysman: Resource manager for the system domain.
- * @man_drv: An array of resource_managers.
- * @vma_manager: Address space manager.
- * @pool: page pool for the device.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
*/
struct ttm_device {
- /*
+ /**
+ * @device_list: Our entry in the global device list.
* Constant after bo device init
*/
struct list_head device_list;
+
+ /**
+ * @funcs: Function table for the device.
+ * Constant after bo device init
+ */
struct ttm_device_funcs *funcs;
- /*
+ /**
+ * @sysman: Resource manager for the system domain.
* Access via ttm_manager_type.
*/
struct ttm_resource_manager sysman;
+
+ /**
+ * @man_drv: An array of resource_managers, one per resource type.
+ */
struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
+ /**
+ * @vma_manager: Address space manager for finding BOs to mmap.
*/
struct drm_vma_offset_manager *vma_manager;
+
+ /**
+ * @pool: page pool for the device.
+ */
struct ttm_pool pool;
- /*
- * Protection for the per manager LRU and ddestroy lists.
+ /**
+ * @lru_lock: Protection for the per manager LRU and ddestroy lists.
*/
spinlock_t lru_lock;
+
+ /**
+ * @ddestroy: Destroyed but not yet cleaned up buffer objects.
+ */
struct list_head ddestroy;
- /*
- * Protected by load / firstopen / lastclose /unload sync.
+ /**
+ * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ */
+ struct list_head pinned;
+
+ /**
+ * @dev_mapping: A pointer to the struct address_space for invalidating
+ * CPU mappings on buffer move. Protected by load/unload sync.
*/
struct address_space *dev_mapping;
- /*
- * Internal protection.
+ /**
+ * @wq: Work queue structure for the delayed delete workqueue.
*/
struct delayed_work wq;
};
@@ -284,12 +291,15 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{
+ BUILD_BUG_ON(__builtin_constant_p(mem_type)
+ && mem_type >= TTM_NUM_MEM_TYPES);
return bdev->man_drv[mem_type];
}
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
struct ttm_resource_manager *manager)
{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
bdev->man_drv[type] = manager;
}
@@ -298,5 +308,6 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32);
void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 8995c9e4ec1b..76d1b9119a2b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -58,6 +58,7 @@
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
+ * @mem_type: One of TTM_PL_* where the resource should be allocated from.
* @flags: memory domain and caching flags for the object
*
* Structure indicating a possible place to put an object.
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 4321728bdd11..ef09b23d29e3 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -37,7 +37,7 @@ struct ttm_pool;
struct ttm_operation_ctx;
/**
- * ttm_pool_type - Pool for a certain memory type
+ * struct ttm_pool_type - Pool for a certain memory type
*
* @pool: the pool we belong to, might be NULL for the global ones
* @order: the allocation order our pages have
@@ -58,8 +58,9 @@ struct ttm_pool_type {
};
/**
- * ttm_pool - Pool for all caching and orders
+ * struct ttm_pool - Pool for all caching and orders
*
+ * @dev: the device we allocate pages for
* @use_dma_alloc: if coherent DMA allocations should be used
* @use_dma32: if GFP_DMA32 should be used
* @caching: pools for each caching/order
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
index 22b6fa42ac20..7963b957e9ef 100644
--- a/include/drm/ttm/ttm_range_manager.h
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -4,6 +4,7 @@
#define _TTM_RANGE_MANAGER_H_
#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
#include <drm/drm_mm.h>
/**
@@ -33,10 +34,23 @@ to_ttm_range_mgr_node(struct ttm_resource *res)
return container_of(res, struct ttm_range_mgr_node, base);
}
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type);
+static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned int type, bool use_tt,
+ unsigned long p_size)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
+}
+static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned int type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_fini_nocheck(bdev, type);
+}
#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 140b6b9a8bbe..5952051091cd 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -40,6 +40,7 @@ struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
+struct ttm_placement;
struct dma_buf_map;
struct io_mapping;
struct sg_table;
@@ -102,10 +103,7 @@ struct ttm_resource_manager_func {
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
+ * @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
* @func: structure pointer implementing the range manager. See above
* @move_lock: lock for move fence
@@ -143,6 +141,7 @@ struct ttm_resource_manager {
* @addr: mapped virtual address
* @offset: physical addr
* @is_iomem: is this io memory ?
+ * @caching: See enum ttm_caching
*
* Structure indicating the bus placement of an object.
*/
@@ -266,6 +265,8 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index b20e89d321b0..f20832139815 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -38,36 +38,70 @@ struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
-
/**
- * struct ttm_tt
- *
- * @pages: Array of pages backing the data.
- * @page_flags: see TTM_PAGE_FLAG_*
- * @num_pages: Number of pages in the page array.
- * @sg: for SG objects via dma-buf
- * @dma_address: The DMA (bus) addresses of the pages
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @pages_list: used by some page allocation backend
- * @caching: The current caching state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
struct page **pages;
+ /**
+ * @page_flags: The page flags.
+ *
+ * Supported values:
+ *
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
+ *
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
+ *
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
+ *
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
+ *
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
+ */
+#define TTM_TT_FLAG_SWAPPED (1 << 0)
+#define TTM_TT_FLAG_ZERO_ALLOC (1 << 1)
+#define TTM_TT_FLAG_EXTERNAL (1 << 2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3)
+
+#define TTM_TT_FLAG_PRIV_POPULATED (1 << 31)
uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
enum ttm_caching caching;
};
@@ -85,7 +119,7 @@ struct ttm_kmap_iter_tt {
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
- return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
}
/**
@@ -104,7 +138,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages
*
* Create a struct ttm_tt to back data with system memory pages.
@@ -127,8 +161,9 @@ int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
void ttm_tt_fini(struct ttm_tt *ttm);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_destroy:
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
@@ -136,13 +171,6 @@ void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_destroy_common:
- *
- * Called from driver to destroy common path.
- */
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
-
-/**
* ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
@@ -156,15 +184,19 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
/**
* ttm_tt_populate - allocate pages for a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
+ * @ctx: operation context for populating the tt object.
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
@@ -181,7 +213,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
*/
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
{
- ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
}
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
@@ -197,7 +229,7 @@ struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 8b32b4bdd590..02c2eb874da6 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -420,6 +420,13 @@ struct dma_buf {
* - Dynamic importers should set fences for any access that they can't
* disable immediately from their &dma_buf_attach_ops.move_notify
* callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers must obey the struct dma_resv rules, specifically the
+ * rules for updating fences, see &dma_resv.fence_excl and
+ * &dma_resv.fence. If these dependency rules are broken access tracking
+ * can be lost resulting in use after free issues.
*/
struct dma_resv *resv;
@@ -433,7 +440,7 @@ struct dma_buf {
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/**
* @sysfs_entry:
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6ffb4b2c6371..a706b7bf51d7 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -214,19 +214,15 @@ struct dma_fence_ops {
* Custom wait implementation, defaults to dma_fence_default_wait() if
* not set.
*
- * The dma_fence_default_wait implementation should work for any fence, as long
- * as @enable_signaling works correctly. This hook allows drivers to
- * have an optimized version for the case where a process context is
- * already available, e.g. if @enable_signaling for the general case
- * needs to set up a worker thread.
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
- *
- * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -590,26 +586,4 @@ struct dma_fence *dma_fence_get_stub(void);
struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%llu: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
-
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 39fefb86780b..8b6c20636a79 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -62,19 +62,188 @@ struct dma_resv_list {
/**
* struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
+ *
+ * There are multiple uses for this, with sometimes slightly different rules in
+ * how the fence slots are used.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
*/
struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
struct ww_mutex lock;
+
+ /**
+ * @seq:
+ *
+ * Sequence count for managing RCU read-side synchronization, allows
+ * read-only access to @fence_excl and @fence while ensuring we take a
+ * consistent snapshot.
+ */
seqcount_ww_mutex_t seq;
+ /**
+ * @fence_excl:
+ *
+ * The exclusive fence, if there is one currently.
+ *
+ * There are two ways to update this fence:
+ *
+ * - First by calling dma_resv_add_excl_fence(), which replaces all
+ * fences attached to the reservation object. To guarantee that no
+ * fences are lost, this new fence must signal only after all previous
+ * fences, both shared and exclusive, have signalled. In some cases it
+ * is convenient to achieve that by attaching a struct dma_fence_array
+ * with all the new and old fences.
+ *
+ * - Alternatively the fence can be set directly, which leaves the
+ * shared fences unchanged. To guarantee that no fences are lost, this
+ * new fence must signal only after the previous exclusive fence has
+ * signalled. Since the shared fences are staying intact, it is not
+ * necessary to maintain any ordering against those. If semantically
+ * only a new access is added without actually treating the previous
+ * one as a dependency the exclusive fences can be strung together
+ * using struct dma_fence_chain.
+ *
+ * Note that actual semantics of what an exclusive or shared fence mean
+ * is defined by the user, for reservation objects shared across drivers
+ * see &dma_buf.resv.
+ */
struct dma_fence __rcu *fence_excl;
+
+ /**
+ * @fence:
+ *
+ * List of current shared fences.
+ *
+ * There are no ordering constraints of shared fences against the
+ * exclusive fence slot. If a waiter needs to wait for all access, it
+ * has to wait for both sets of fences to signal.
+ *
+ * A new fence is added by calling dma_resv_add_shared_fence(). Since
+ * this often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_shared().
+ *
+ * Note that actual semantics of what an exclusive or shared fence mean
+ * is defined by the user, for reservation objects shared across drivers
+ * see &dma_buf.resv.
+ */
struct dma_resv_list __rcu *fence;
};
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @all_fences: If all fences should be returned */
+ bool all_fences;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @seq: sequence number to check for modifications */
+ unsigned int seq;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences */
+ struct dma_resv_list *fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+
+/**
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @all_fences: If all fences should be returned or just the exclusive one
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ bool all_fences)
+{
+ cursor->obj = obj;
+ cursor->all_fences = all_fences;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * @cursor: the cursor of the current position
+ *
+ * Returns true if the currently returned fence is the exclusive one.
+ */
+static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+{
+ return cursor->index == 0;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
+ *
+ * Return true if this is the first fence in an iteration after a restart.
+ */
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
+{
+ return cursor->is_restarted;
+}
+
+/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ * When the dma_resv is modified the iteration starts over again.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
@@ -98,6 +267,13 @@ static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
*/
static inline int dma_resv_lock(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -119,6 +295,12 @@ static inline int dma_resv_lock(struct dma_resv *obj,
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
*/
static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -134,6 +316,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
* Acquires the reservation object after a die case. This function
* will sleep until the lock becomes available. See dma_resv_lock() as
* well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
*/
static inline void dma_resv_lock_slow(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -167,7 +351,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
* if they overlap with a writer.
*
* Also note that since no context is provided, no deadlock protection is
- * possible.
+ * possible, which is also not needed for a trylock.
*
* Returns true if the lock was acquired, false otherwise.
*/
@@ -193,6 +377,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj)
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
*/
static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
diff --git a/include/linux/io.h b/include/linux/io.h
index 9595151d800d..5fc800390fe4 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -132,6 +132,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -166,4 +168,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index 3cca2b8fac43..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 9814fff58a69..76fbf92b04d9 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker);
extern int register_shrinker(struct shrinker *shrinker);
extern void unregister_shrinker(struct shrinker *shrinker);
extern void free_prealloced_shrinker(struct shrinker *shrinker);
+extern void synchronize_shrinkers(void);
#endif
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 0cbd1540aeac..26e45fc5eb1a 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -786,13 +786,6 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
/* query ras mask of enabled features*/
#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
-/* query video encode/decode caps */
-#define AMDGPU_INFO_VIDEO_CAPS 0x21
- /* Subquery id: Decode */
- #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
- /* Subquery id: Encode */
- #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
-
/* RAS MASK: UMC (VRAM) */
#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
/* RAS MASK: SDMA */
@@ -821,6 +814,12 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
/* RAS MASK: FUSE */
#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
+/* query video encode/decode caps */
+#define AMDGPU_INFO_VIDEO_CAPS 0x21
+ /* Subquery id: Decode */
+ #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
+ /* Subquery id: Encode */
+ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 9f4bb4a6f358..7f652c96845b 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -104,6 +104,12 @@ extern "C" {
/* 8 bpp Red */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
+/* 10 bpp Red */
+#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
+
+/* 12 bpp Red */
+#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
+
/* 16 bpp Red */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
@@ -373,6 +379,12 @@ extern "C" {
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
+#define fourcc_mod_get_vendor(modifier) \
+ (((modifier) >> 56) & 0xff)
+
+#define fourcc_mod_is_vendor(modifier, vendor) \
+ (fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
+
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 90c55383f1ee..e1e351682872 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -1110,6 +1110,10 @@ struct drm_mode_destroy_blob {
* struct drm_mode_create_lease - Create lease
*
* Lease mode resources, creating another drm_master.
+ *
+ * The @object_ids array must reference at least one CRTC, one connector and
+ * one plane if &DRM_CLIENT_CAP_UNIVERSAL_PLANES is enabled. Alternatively,
+ * the lease can be completely empty.
*/
struct drm_mode_create_lease {
/** @object_ids: Pointer to array of object ids (__u32) */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index bde5860b3686..914ebd9290e5 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1522,6 +1522,12 @@ struct drm_i915_gem_caching {
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1824,6 +1830,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1846,6 +1853,55 @@ struct drm_i915_gem_context_param {
* attempted to use it, never re-use this context param number.
*/
#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -2050,6 +2106,135 @@ struct i915_context_engines_bond {
} __attribute__((packed)) name__
/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[0];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
* DOC: Context Engine Map uAPI
*
* Context engine map is a new way of addressing engines when submitting batch-
@@ -2108,6 +2293,7 @@ struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
@@ -2726,14 +2912,20 @@ struct drm_i915_engine_info {
/** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
/** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
/** @rsvd1: Reserved fields. */
- __u64 rsvd1[4];
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2979,8 +3171,12 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
* struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
__u64 extensions;
};
@@ -3038,6 +3234,50 @@ struct drm_i915_gem_create_ext_memory_regions {
__u64 regions;
};
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 4104f22fb3d3..3dfc0af8756a 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -58,6 +58,67 @@ extern "C" {
struct drm_v3d_perfmon_get_values)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+#define DRM_V3D_SUBMIT_EXTENSION 0x02
+
+/* struct drm_v3d_extension - ioctl extensions
+ *
+ * Linked-list of generic extensions where the id identify which struct is
+ * pointed by ext_data. Therefore, DRM_V3D_EXT_ID_* is used on id to identify
+ * the extension type.
+ */
+struct drm_v3d_extension {
+ __u64 next;
+ __u32 id;
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+ __u32 flags; /* mbz */
+};
+
+/* struct drm_v3d_sem - wait/signal semaphore
+ *
+ * If binary semaphore, it only takes syncobj handle and ignores flags and
+ * point fields. Point is defined for timeline syncobj feature.
+ */
+struct drm_v3d_sem {
+ __u32 handle; /* syncobj */
+ /* rsv below, for future uses */
+ __u32 flags;
+ __u64 point; /* for timeline sem support */
+ __u64 mbz[2]; /* must be zero, rsv */
+};
+
+/* Enum for each of the V3D queues. */
+enum v3d_queue {
+ V3D_BIN,
+ V3D_RENDER,
+ V3D_TFU,
+ V3D_CSD,
+ V3D_CACHE_CLEAN,
+};
+
+/**
+ * struct drm_v3d_multi_sync - ioctl extension to add support multiples
+ * syncobjs for commands submission.
+ *
+ * When an extension of DRM_V3D_EXT_ID_MULTI_SYNC id is defined, it points to
+ * this extension to define wait and signal dependencies, instead of single
+ * in/out sync entries on submitting commands. The field flags is used to
+ * determine the stage to set wait dependencies.
+ */
+struct drm_v3d_multi_sync {
+ struct drm_v3d_extension base;
+ /* Array of wait and signal semaphores */
+ __u64 in_syncs;
+ __u64 out_syncs;
+
+ /* Number of entries */
+ __u32 in_sync_count;
+ __u32 out_sync_count;
+
+ /* set the stage (v3d_queue) to sync */
+ __u32 wait_stage;
+
+ __u32 pad; /* mbz */
+};
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -135,12 +196,16 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
+ /* DRM_V3D_SUBMIT_* properties */
__u32 flags;
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
__u32 pad;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/**
@@ -210,6 +275,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CSD,
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
+ DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
};
struct drm_v3d_get_param {
@@ -248,6 +314,11 @@ struct drm_v3d_submit_tfu {
__u32 in_sync;
/* Sync object to signal when the TFU job is done. */
__u32 out_sync;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -276,6 +347,13 @@ struct drm_v3d_submit_csd {
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmon_id;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+
+ __u32 flags;
+
+ __u32 pad;
};
enum {
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index b9ec26e9c646..a13e20cc66b4 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,12 +47,15 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+#define DRM_VIRTGPU_CONTEXT_INIT 0x0b
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_RING_IDX 0x04
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ VIRTGPU_EXECBUF_RING_IDX |\
0)
struct drm_virtgpu_map {
@@ -68,6 +71,8 @@ struct drm_virtgpu_execbuffer {
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
+ __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
+ __u32 pad;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -75,6 +80,8 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
+#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
+#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
struct drm_virtgpu_getparam {
__u64 param;
@@ -173,6 +180,22 @@ struct drm_virtgpu_resource_create_blob {
__u64 blob_id;
};
+#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
+#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
+#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+struct drm_virtgpu_context_set_param {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_virtgpu_context_init {
+ __u32 num_params;
+ __u32 pad;
+
+ /* pointer to drm_virtgpu_context_set_param array */
+ __u64 ctx_set_params;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -212,6 +235,10 @@ struct drm_virtgpu_resource_create_blob {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
struct drm_virtgpu_resource_create_blob)
+#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
+ struct drm_virtgpu_context_init)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 97523a95781d..f556fde07b76 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -59,6 +59,11 @@
* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+/*
+ * VIRTIO_GPU_CMD_CREATE_CONTEXT with
+ * context_init and multiple timelines
+ */
+#define VIRTIO_GPU_F_CONTEXT_INIT 4
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -122,14 +127,20 @@ enum virtio_gpu_shm_id {
VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
};
-#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
+/*
+ * If the following flag is set, then ring_idx contains the index
+ * of the command ring that needs to used when creating the fence
+ */
+#define VIRTIO_GPU_FLAG_INFO_RING_IDX (1 << 1)
struct virtio_gpu_ctrl_hdr {
__le32 type;
__le32 flags;
__le64 fence_id;
__le32 ctx_id;
- __le32 padding;
+ __u8 ring_idx;
+ __u8 padding[3];
};
/* data passed in the cursor vq */
@@ -269,10 +280,11 @@ struct virtio_gpu_resource_create_3d {
};
/* VIRTIO_GPU_CMD_CTX_CREATE */
+#define VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK 0x000000ff
struct virtio_gpu_ctx_create {
struct virtio_gpu_ctrl_hdr hdr;
__le32 nlen;
- __le32 padding;
+ __le32 context_init;
char debug_name[64];
};