diff options
author | Jani Nikula <jani.nikula@intel.com> | 2017-10-18 12:37:31 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2017-10-18 12:37:31 +0300 |
commit | 526b96c4f8dceb0104e267c61e5a3b22c538570c (patch) | |
tree | 8825258c4f678f5ad9fddc0afaf72d640458481a /include | |
parent | cb8d50dfb341e9615c8d203a3e6513dae9ff901d (diff) | |
parent | 40d86701a625eed9e644281b9af228d6a52d8ed9 (diff) | |
download | linux-stable-526b96c4f8dceb0104e267c61e5a3b22c538570c.tar.gz linux-stable-526b96c4f8dceb0104e267c61e5a3b22c538570c.tar.bz2 linux-stable-526b96c4f8dceb0104e267c61e5a3b22c538570c.zip |
Merge drm-upstream/drm-next into drm-intel-next-queued
Needed for timer_setup() and drm_dev_{get,put}() conversions in i915.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'include')
32 files changed, 319 insertions, 147 deletions
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h index fbdfc8d7f3c7..96a5e0f6ff12 100644 --- a/include/drm/bridge/mhl.h +++ b/include/drm/bridge/mhl.h @@ -262,6 +262,10 @@ enum { #define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */ #define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */ +/* Bit masks for RCP messages */ +#define MHL_RCP_KEY_RELEASED_MASK 0x80 +#define MHL_RCP_KEY_ID_MASK 0x7F + /* * Error status codes for RCPE messages */ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 5834580d75bc..5afd6e364fb6 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -585,12 +585,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->num_connector && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (old_connector_state) = (__state)->connectors[__i].old_state, \ - (new_connector_state) = (__state)->connectors[__i].new_state, 1); \ - (__i)++) \ - for_each_if (connector) + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (old_connector_state) = (__state)->connectors[__i].old_state, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) /** * for_each_old_connector_in_state - iterate over all connectors in an atomic update @@ -606,11 +606,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->num_connector && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (old_connector_state) = (__state)->connectors[__i].old_state, 1); \ - (__i)++) \ - for_each_if (connector) + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (old_connector_state) = (__state)->connectors[__i].old_state, 1)) /** * for_each_new_connector_in_state - iterate over all connectors in an atomic update @@ -626,11 +626,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->num_connector && \ - ((connector) = (__state)->connectors[__i].ptr, \ - (new_connector_state) = (__state)->connectors[__i].new_state, 1); \ - (__i)++) \ - for_each_if (connector) + (__i) < (__state)->num_connector; \ + (__i)++) \ + for_each_if ((__state)->connectors[__i].ptr && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (new_connector_state) = (__state)->connectors[__i].new_state, 1)) /** * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update @@ -646,12 +646,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (old_crtc_state) = (__state)->crtcs[__i].old_state, \ - (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \ + (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ - for_each_if (crtc) + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)) /** * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update @@ -666,11 +666,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (old_crtc_state) = (__state)->crtcs[__i].old_state, 1); \ + (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ - for_each_if (crtc) + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (old_crtc_state) = (__state)->crtcs[__i].old_state, 1)) /** * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update @@ -685,11 +685,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_crtc && \ - ((crtc) = (__state)->crtcs[__i].ptr, \ - (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \ + (__i) < (__state)->dev->mode_config.num_crtc; \ (__i)++) \ - for_each_if (crtc) + for_each_if ((__state)->crtcs[__i].ptr && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (new_crtc_state) = (__state)->crtcs[__i].new_state, 1)) /** * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update @@ -705,12 +705,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane && \ - ((plane) = (__state)->planes[__i].ptr, \ - (old_plane_state) = (__state)->planes[__i].old_state, \ - (new_plane_state) = (__state)->planes[__i].new_state, 1); \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ - for_each_if (plane) + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state,\ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** * for_each_old_plane_in_state - iterate over all planes in an atomic update @@ -725,12 +725,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane && \ - ((plane) = (__state)->planes[__i].ptr, \ - (old_plane_state) = (__state)->planes[__i].old_state, 1); \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ - for_each_if (plane) - + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (old_plane_state) = (__state)->planes[__i].old_state, 1)) /** * for_each_new_plane_in_state - iterate over all planes in an atomic update * @__state: &struct drm_atomic_state pointer @@ -744,11 +743,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); */ #define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \ for ((__i) = 0; \ - (__i) < (__state)->dev->mode_config.num_total_plane && \ - ((plane) = (__state)->planes[__i].ptr, \ - (new_plane_state) = (__state)->planes[__i].new_state, 1); \ + (__i) < (__state)->dev->mode_config.num_total_plane; \ (__i)++) \ - for_each_if (plane) + for_each_if ((__state)->planes[__i].ptr && \ + ((plane) = (__state)->planes[__i].ptr, \ + (new_plane_state) = (__state)->planes[__i].new_state, 1)) /** * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update @@ -768,8 +767,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); ((obj) = (__state)->private_objs[__i].ptr, \ (old_obj_state) = (__state)->private_objs[__i].old_state, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ - (__i)++) \ - for_each_if (obj) + (__i)++) /** * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update @@ -787,8 +785,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \ - (__i)++) \ - for_each_if (obj) + (__i)++) /** * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update @@ -806,8 +803,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p); (__i) < (__state)->num_private_objs && \ ((obj) = (__state)->private_objs[__i].ptr, \ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \ - (__i)++) \ - for_each_if (obj) + (__i)++) /** * drm_atomic_crtc_needs_modeset - compute combined modeset need diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index b34904dc8b9b..b4285c40e1e4 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -939,10 +939,11 @@ static inline unsigned drm_connector_index(struct drm_connector *connector) * add takes a reference to it. */ static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR); + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR); return mo ? obj_to_connector(mo) : NULL; } diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 80c97210eda5..f7fcceef46d9 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -959,10 +959,11 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx); * userspace interface should be done using &drm_property. */ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, - uint32_t id) + struct drm_file *file_priv, + uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC); + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC); return mo ? obj_to_crtc(mo) : NULL; } diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 71bbaaec836d..412e83a4d3db 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -155,7 +155,7 @@ struct drm_driver { * reverse order of the initialization. Similarly to the load * hook, this handler is deprecated and its usage should be * dropped in favor of an open-coded teardown function at the - * driver layer. See drm_dev_unregister() and drm_dev_unref() + * driver layer. See drm_dev_unregister() and drm_dev_put() * for the proper way to remove a &struct drm_device. * * The unload() hook is called right after unregistering @@ -324,7 +324,7 @@ struct drm_driver { */ bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, int *max_error, - struct timeval *vblank_time, + ktime_t *vblank_time, bool in_vblank_irq); /** @@ -611,7 +611,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); -void drm_dev_ref(struct drm_device *dev); +void drm_dev_get(struct drm_device *dev); +void drm_dev_put(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); void drm_dev_unplug(struct drm_device *dev); diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index 8d8245ec0181..86db0da8bdcb 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -214,11 +214,12 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, * drm_mode_object_find(). */ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER); + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); return mo ? obj_to_encoder(mo) : NULL; } diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index a323781afc3f..023f052a5873 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -28,16 +28,6 @@ void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state); void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, bool state); -void drm_fb_cma_destroy(struct drm_framebuffer *fb); -int drm_fb_cma_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned int *handle); - -struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev, - struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd, - const struct drm_framebuffer_funcs *funcs); -struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, - struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd); - struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); @@ -45,9 +35,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); -int drm_fb_cma_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state); - #ifdef CONFIG_DEBUG_FS struct seq_file; diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index b6996ddb19d6..4c5ee4ae54df 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -205,6 +205,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id); void drm_framebuffer_remove(struct drm_framebuffer *fb); void drm_framebuffer_cleanup(struct drm_framebuffer *fb); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 1b37368416c8..0b4ac2ebc610 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -430,19 +430,6 @@ struct drm_mode_config { struct list_head encoder_list; /** - * @num_overlay_plane: - * - * Number of overlay planes on this device, excluding primary and cursor - * planes. - * - * Track number of overlay planes separately from number of total - * planes. By default we only advertise overlay planes to userspace; if - * userspace sets the "universal plane" capability bit, we'll go ahead - * and expose all planes. This is invariant over the lifetime of a - * device and hence doesn't need any locks. - */ - int num_overlay_plane; - /** * @num_total_plane: * * Number of universal (i.e. with primary/curso) planes on this device. diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index a767b4a30a6d..b2f920b518e3 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -27,6 +27,7 @@ struct drm_object_properties; struct drm_property; struct drm_device; +struct drm_file; /** * struct drm_mode_object - base structure for modeset objects @@ -113,6 +114,7 @@ struct drm_object_properties { } struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id, uint32_t type); void drm_mode_object_get(struct drm_mode_object *obj); void drm_mode_object_put(struct drm_mode_object *obj); diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 104dd517fdbe..d20ec4e0431d 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -2,6 +2,9 @@ #define __DRM_OF_H__ #include <linux/of_graph.h> +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) +#include <drm/drm_bridge.h> +#endif struct component_master_ops; struct component_match; @@ -67,6 +70,34 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np, } #endif +/* + * drm_of_panel_bridge_remove - remove panel bridge + * @np: device tree node containing panel bridge output ports + * + * Remove the panel bridge of a given DT node's port and endpoint number + * + * Returns zero if successful, or one of the standard error codes if it fails. + */ +static inline int drm_of_panel_bridge_remove(const struct device_node *np, + int port, int endpoint) +{ +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE) + struct drm_bridge *bridge; + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, endpoint); + if (!remote) + return -ENODEV; + + bridge = of_drm_find_bridge(remote); + drm_panel_bridge_remove(bridge); + + return 0; +#else + return -EINVAL; +#endif +} + static inline int drm_of_encoder_active_endpoint_id(struct device_node *node, struct drm_encoder *encoder) { diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 82a217bd77f0..069c4c8ce360 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -597,10 +597,11 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane, * drm_mode_object_find(). */ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE); + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); return mo ? obj_to_plane(mo) : NULL; } diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 37355c623e6c..429d8218f740 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -312,10 +312,11 @@ drm_property_unreference_blob(struct drm_property_blob *blob) * This function looks up the property object specified by id and returns it. */ static inline struct drm_property *drm_property_find(struct drm_device *dev, + struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY); + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); return mo ? obj_to_property(mo) : NULL; } diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index c00fee539822..43e2f382d2f0 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -136,5 +136,10 @@ int drm_syncobj_find_fence(struct drm_file *file_private, u32 handle, struct dma_fence **fence); void drm_syncobj_free(struct kref *kref); +int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, + struct dma_fence *fence); +int drm_syncobj_get_handle(struct drm_file *file_private, + struct drm_syncobj *syncobj, u32 *handle); +int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd); #endif diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index 7fba9efe4951..6a58e2e91a0f 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -92,7 +92,7 @@ struct drm_vblank_crtc { /** * @time: Vblank timestamp corresponding to @count. */ - struct timeval time; + ktime_t time; /** * @refcount: Number of users/waiters of the vblank interrupt. Only when @@ -154,7 +154,7 @@ struct drm_vblank_crtc { int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); u32 drm_crtc_vblank_count(struct drm_crtc *crtc); u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, - struct timeval *vblanktime); + ktime_t *vblanktime); void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, @@ -172,7 +172,7 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, unsigned int pipe, int *max_error, - struct timeval *vblank_time, + ktime_t *vblank_time, bool in_vblank_irq); void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode); diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index c4520890f267..2c1e3598effe 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -150,10 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, - bool no_wait, bool interruptible); + struct page *page, uint64_t size); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page); + struct page *page, uint64_t size); extern size_t ttm_round_pot(size_t size); extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob); #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 460294bb0fa5..02fa42d24b52 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -551,6 +551,7 @@ struct request_queue { int node; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace *blk_trace; + struct mutex blk_trace_mutex; #endif /* * for flush operations diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f24bfb2b9a2d..6d508767e144 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -3,8 +3,27 @@ #include <linux/types.h> +/* + * CPU-up CPU-down + * + * BP AP BP AP + * + * OFFLINE OFFLINE + * | ^ + * v | + * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead) + * | AP_OFFLINE + * v (IRQ-off) ,---------------^ + * AP_ONLNE | (stop_machine) + * | TEARDOWN_CPU <- AP_ONLINE_IDLE + * | ^ + * v | + * AP_ACTIVE AP_ACTIVE + */ + enum cpuhp_state { - CPUHP_OFFLINE, + CPUHP_INVALID = -1, + CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 171895072435..ca974224d92e 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -248,9 +248,12 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) struct dma_fence *fence; fence = rcu_dereference(*fencep); - if (!fence || !dma_fence_get_rcu(fence)) + if (!fence) return NULL; + if (!dma_fence_get_rcu(fence)) + continue; + /* The atomic_inc_not_zero() inside dma_fence_get_rcu() * provides a full memory barrier upon success (such as now). * This is paired with the write barrier from assigning diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a7f2ac689d29..41b8c5757859 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -167,11 +167,11 @@ struct iommu_resv_region { * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @map_sg: map a scatter-gather list of physically contiguous memory chunks + * to an iommu domain * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain * @tlb_range_add: Add a given iova range to the flush queue for this domain * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue - * to an iommu domain * @iova_to_phys: translate iova to physical address * @add_device: add device to iommu grouping * @remove_device: remove device from iommu grouping diff --git a/include/linux/key.h b/include/linux/key.h index 044114185120..e315e16b6ff8 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -187,6 +187,7 @@ struct key { #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ #define KEY_FLAG_KEEP 10 /* set if key should not be removed */ +#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */ /* the key type and key description string * - the desc is used to match a key against search criteria @@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ +#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 9c5cb4480806..a726f96010d5 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -346,11 +346,6 @@ struct nvme_fc_remote_port { * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * - * @defer_rcv: Called by the transport to signal the LLLD that it has - * begun processing of a previously received NVME CMD IU. The LLDD - * is now free to re-use the rcv buffer associated with the - * nvmefc_tgt_fcp_req. - * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -806,11 +801,19 @@ struct nvmet_fc_target_port { * outstanding operation (if there was one) to complete, then will * call the fcp_req_release() callback to return the command's * exchange context back to the LLDD. + * Entrypoint is Mandatory. * * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req * to the LLDD after all operations on the fcp operation are complete. * This may be due to the command completing or upon completion of * abort cleanup. + * Entrypoint is Mandatory. + * + * @defer_rcv: Called by the transport to signal the LLLD that it has + * begun processing of a previously received NVME CMD IU. The LLDD + * is now free to re-use the rcv buffer associated with the + * nvmefc_tgt_fcp_req. + * Entrypoint is Optional. * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 87723c86f136..9310ce77d8e1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -471,12 +471,14 @@ enum nvme_opcode { * * @NVME_SGL_FMT_ADDRESS: absolute address of the data block * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block + * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation * request subtype */ enum { NVME_SGL_FMT_ADDRESS = 0x00, NVME_SGL_FMT_OFFSET = 0x01, + NVME_SGL_FMT_TRANSPORT_A = 0x0A, NVME_SGL_FMT_INVALIDATE = 0x0f, }; @@ -490,12 +492,16 @@ enum { * * For struct nvme_keyed_sgl_desc: * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor + * + * Transport-specific SGL types: + * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor */ enum { NVME_SGL_FMT_DATA_DESC = 0x00, NVME_SGL_FMT_SEG_DESC = 0x02, NVME_SGL_FMT_LAST_SEG_DESC = 0x03, NVME_KEY_SGL_FMT_DATA_DESC = 0x04, + NVME_TRANSPORT_SGL_DATA_DESC = 0x05, }; struct nvme_sgl_desc { @@ -1127,19 +1133,6 @@ enum { NVME_SC_UNWRITTEN_BLOCK = 0x287, NVME_SC_DNR = 0x4000, - - - /* - * FC Transport-specific error status values for NVME commands - * - * Transport-specific status code values must be in the range 0xB0..0xBF - */ - - /* Generic FC failure - catchall */ - NVME_SC_FC_TRANSPORT_ERROR = 0x00B0, - - /* I/O failure due to FC ABTS'd */ - NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1, }; struct nvme_completion { diff --git a/include/linux/pci.h b/include/linux/pci.h index f68c58a93dd0..f4f8ee5a7362 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1685,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pci(d) (false) #define dev_is_pf(d) (false) +static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) +{ return false; } #endif /* CONFIG_PCI */ /* Include architecture-dependent settings and functions */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 978abfbac617..93a4663d7acb 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -139,6 +139,45 @@ struct reg_sequence { pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ }) +/** + * regmap_field_read_poll_timeout - Poll until a condition is met or timeout + * + * @field: Regmap field to read from + * @val: Unsigned integer variable to read the value into + * @cond: Break condition (usually involving @val) + * @sleep_us: Maximum time to sleep between reads in us (0 + * tight-loops). Should be less than ~20ms since usleep_range + * is used (see Documentation/timers/timers-howto.txt). + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. Must not be called + * from atomic context if sleep_us or timeout_us are used. + * + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + */ +#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ +({ \ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ + int pollret; \ + might_sleep_if(sleep_us); \ + for (;;) { \ + pollret = regmap_field_read((field), &(val)); \ + if (pollret) \ + break; \ + if (cond) \ + break; \ + if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ + pollret = regmap_field_read((field), &(val)); \ + break; \ + } \ + if (sleep_us) \ + usleep_range((sleep_us >> 2) + 1, sleep_us); \ + } \ + pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ +}) + #ifdef CONFIG_REGMAP enum regmap_endian { diff --git a/include/linux/sched.h b/include/linux/sched.h index 92fb8dd5a9e4..26a7df4e558c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -65,25 +65,23 @@ struct task_group; */ /* Used in tsk->state: */ -#define TASK_RUNNING 0 -#define TASK_INTERRUPTIBLE 1 -#define TASK_UNINTERRUPTIBLE 2 -#define __TASK_STOPPED 4 -#define __TASK_TRACED 8 +#define TASK_RUNNING 0x0000 +#define TASK_INTERRUPTIBLE 0x0001 +#define TASK_UNINTERRUPTIBLE 0x0002 +#define __TASK_STOPPED 0x0004 +#define __TASK_TRACED 0x0008 /* Used in tsk->exit_state: */ -#define EXIT_DEAD 16 -#define EXIT_ZOMBIE 32 +#define EXIT_DEAD 0x0010 +#define EXIT_ZOMBIE 0x0020 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) /* Used in tsk->state again: */ -#define TASK_DEAD 64 -#define TASK_WAKEKILL 128 -#define TASK_WAKING 256 -#define TASK_PARKED 512 -#define TASK_NOLOAD 1024 -#define TASK_NEW 2048 -#define TASK_STATE_MAX 4096 - -#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" +#define TASK_PARKED 0x0040 +#define TASK_DEAD 0x0080 +#define TASK_WAKEKILL 0x0100 +#define TASK_WAKING 0x0200 +#define TASK_NOLOAD 0x0400 +#define TASK_NEW 0x0800 +#define TASK_STATE_MAX 0x1000 /* Convenience macros for the sake of set_current_state: */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -99,7 +97,8 @@ struct task_group; /* get_task_state(): */ #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ + TASK_PARKED) #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) @@ -1243,17 +1242,34 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) return task_pgrp_nr_ns(tsk, &init_pid_ns); } -static inline char task_state_to_char(struct task_struct *task) +#define TASK_REPORT_IDLE (TASK_REPORT + 1) +#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) + +static inline unsigned int __get_task_state(struct task_struct *tsk) +{ + unsigned int tsk_state = READ_ONCE(tsk->state); + unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; + + BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); + + if (tsk_state == TASK_IDLE) + state = TASK_REPORT_IDLE; + + return fls(state); +} + +static inline char __task_state_to_char(unsigned int state) { - const char stat_nam[] = TASK_STATE_TO_CHAR_STR; - unsigned long state = task->state; + static const char state_char[] = "RSDTtXZPI"; - state = state ? __ffs(state) + 1 : 0; + BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); - /* Make sure the string lines up properly with the number of task states: */ - BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); + return state_char[state]; +} - return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; +static inline char task_state_to_char(struct task_struct *tsk) +{ + return __task_state_to_char(__get_task_state(tsk)); } /** diff --git a/include/linux/timer.h b/include/linux/timer.h index e6789b8757d5..6383c528b148 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer, #define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) + +static inline void timer_setup(struct timer_list *timer, + void (*callback)(struct timer_list *), + unsigned int flags) +{ + __setup_timer(timer, (TIMER_FUNC_TYPE)callback, + (TIMER_DATA_TYPE)timer, flags); +} + +#define from_timer(var, callback_timer, timer_fieldname) \ + container_of(callback_timer, typeof(*var), timer_fieldname) + /** * timer_pending - is a timer pending? * @timer: the timer in question diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bdb1279a415b..e8608b2dc844 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -285,7 +285,7 @@ enum ib_tm_cap_flags { IB_TM_CAP_RC = 1 << 0, }; -struct ib_xrq_caps { +struct ib_tm_caps { /* Max size of RNDV header */ u32 max_rndv_hdr_size; /* Max number of entries in tag matching list */ @@ -358,7 +358,7 @@ struct ib_device_attr { struct ib_rss_caps rss_caps; u32 max_wq_type_rq; u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ - struct ib_xrq_caps xrq_caps; + struct ib_tm_caps tm_caps; }; enum ib_mtu { @@ -1739,7 +1739,7 @@ struct ib_mr { u32 lkey; u32 rkey; u64 iova; - u32 length; + u64 length; unsigned int page_size; bool need_inval; union { diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ae1409ffe99a..3c8b7f625670 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -114,7 +114,10 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * * Preemption ignores task state, therefore preempted tasks are always * RUNNING (we will not have dequeued if state != RUNNING). */ - return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; + if (preempt) + return TASK_STATE_MAX; + + return __get_task_state(p); } #endif /* CREATE_TRACE_POINTS */ @@ -152,12 +155,14 @@ TRACE_EVENT(sched_switch, TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, - __entry->prev_state & (TASK_STATE_MAX-1) ? - __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", - { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, - { 16, "Z" }, { 32, "X" }, { 64, "x" }, - { 128, "K" }, { 256, "W" }, { 512, "P" }, - { 1024, "N" }) : "R", + + (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? + __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", + { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" }, + { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" }, + { 0x40, "P" }, { 0x80, "I" }) : + "R", + __entry->prev_state & TASK_STATE_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index e055776f2f4c..4c6e8c482ee4 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -52,6 +52,7 @@ extern "C" { #define DRM_AMDGPU_GEM_USERPTR 0x11 #define DRM_AMDGPU_WAIT_FENCES 0x12 #define DRM_AMDGPU_VM 0x13 +#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) @@ -67,6 +68,7 @@ extern "C" { #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) +#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) #define AMDGPU_GEM_DOMAIN_CPU 0x1 #define AMDGPU_GEM_DOMAIN_GTT 0x2 @@ -515,6 +517,20 @@ struct drm_amdgpu_cs_chunk_sem { __u32 handle; }; +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 +#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 + +union drm_amdgpu_fence_to_handle { + struct { + struct drm_amdgpu_fence fence; + __u32 what; + } in; + struct { + __u32 handle; + } out; +}; + struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data; diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 76f6f78a352b..110cc73bf549 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -150,6 +150,19 @@ struct drm_etnaviv_gem_submit_bo { __u64 presumed; /* in/out, presumed buffer address */ }; +/* performance monitor request (pmr) */ +#define ETNA_PM_PROCESS_PRE 0x0001 +#define ETNA_PM_PROCESS_POST 0x0002 +struct drm_etnaviv_gem_submit_pmr { + __u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */ + __u8 domain; /* in, pm domain */ + __u8 pad; + __u16 signal; /* in, pm signal */ + __u32 sequence; /* in, sequence number */ + __u32 read_offset; /* in, offset from read_bo */ + __u32 read_idx; /* in, index of read_bo buffer */ +}; + /* Each cmdstream submit consists of a table of buffers involved, and * one or more cmdstream buffers. This allows for conditional execution * (context-restore), and IB buffers needed for per tile/bin draw cmds. @@ -175,6 +188,9 @@ struct drm_etnaviv_gem_submit { __u64 stream; /* in, ptr to cmdstream */ __u32 flags; /* in, mask of ETNA_SUBMIT_x */ __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */ + __u64 pmrs; /* in, ptr to array of submit_pmr's */ + __u32 nr_pmrs; /* in, number of submit_pmr's */ + __u32 pad; }; /* The normal way to synchronize with the GPU is just to CPU_PREP on @@ -210,6 +226,27 @@ struct drm_etnaviv_gem_wait { struct drm_etnaviv_timespec timeout; /* in */ }; +/* + * Performance Monitor (PM): + */ + +struct drm_etnaviv_pm_domain { + __u32 pipe; /* in */ + __u8 iter; /* in/out, select pm domain at index iter */ + __u8 id; /* out, id of domain */ + __u16 nr_signals; /* out, how many signals does this domain provide */ + char name[64]; /* out, name of domain */ +}; + +struct drm_etnaviv_pm_signal { + __u32 pipe; /* in */ + __u8 domain; /* in, pm domain index */ + __u8 pad; + __u16 iter; /* in/out, select pm source at index iter */ + __u16 id; /* out, id of signal */ + char name[64]; /* out, name of domain */ +}; + #define DRM_ETNAVIV_GET_PARAM 0x00 /* placeholder: #define DRM_ETNAVIV_SET_PARAM 0x01 @@ -222,7 +259,9 @@ struct drm_etnaviv_gem_wait { #define DRM_ETNAVIV_WAIT_FENCE 0x07 #define DRM_ETNAVIV_GEM_USERPTR 0x08 #define DRM_ETNAVIV_GEM_WAIT 0x09 -#define DRM_ETNAVIV_NUM_IOCTLS 0x0a +#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a +#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b +#define DRM_ETNAVIV_NUM_IOCTLS 0x0c #define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param) #define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new) @@ -233,6 +272,8 @@ struct drm_etnaviv_gem_wait { #define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence) #define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr) #define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait) +#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain) +#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal) #if defined(__cplusplus) } diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 9a0b6479fe0c..d4e0b53bfc75 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; __u32 raw_packet_caps; - struct ib_uverbs_tm_caps xrq_caps; + struct ib_uverbs_tm_caps tm_caps; }; struct ib_uverbs_query_port { |