From 52791eeec1d9f4a7e7fe08aaba0b1553149d93bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sun, 11 Aug 2019 10:06:32 +0200 Subject: dma-buf: rename reservation_object to dma_resv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Be more consistent with the naming of the other DMA-buf objects. Signed-off-by: Christian König Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/323401/ --- include/drm/drmP.h | 2 +- include/drm/drm_gem.h | 8 +- include/drm/ttm/ttm_bo_api.h | 12 +- include/drm/ttm/ttm_bo_driver.h | 14 +- include/linux/dma-buf.h | 4 +- include/linux/dma-fence.h | 4 +- include/linux/dma-resv.h | 312 +++++++++++++++++++++++++++++++++++++ include/linux/reservation.h | 332 ---------------------------------------- 8 files changed, 334 insertions(+), 354 deletions(-) create mode 100644 include/linux/dma-resv.h delete mode 100644 include/linux/reservation.h (limited to 'include') diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 94aae87b1138..037b1f7a87a5 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -87,7 +87,7 @@ struct module; struct device_node; struct videomode; -struct reservation_object; +struct dma_resv; struct dma_buf_attachment; struct pci_dev; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index ae693c0666cd..6aaba14f5972 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -35,7 +35,7 @@ */ #include -#include +#include #include @@ -276,7 +276,7 @@ struct drm_gem_object { * * Normally (@resv == &@_resv) except for imported GEM objects. */ - struct reservation_object *resv; + struct dma_resv *resv; /** * @_resv: @@ -285,7 +285,7 @@ struct drm_gem_object { * * This is unused for imported GEM objects. */ - struct reservation_object _resv; + struct dma_resv _resv; /** * @funcs: @@ -390,7 +390,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); -long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, +long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout); int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 65ef5376de59..43c4929a2171 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -40,7 +40,7 @@ #include #include #include -#include +#include struct ttm_bo_global; @@ -273,7 +273,7 @@ struct ttm_bo_kmap_obj { struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; - struct reservation_object *resv; + struct dma_resv *resv; uint64_t bytes_moved; uint32_t flags; }; @@ -493,7 +493,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @page_alignment: Data alignment in pages. * @ctx: TTM operation context for memory allocation. * @acc_size: Accounted size for this object. - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -526,7 +526,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, struct ttm_operation_ctx *ctx, size_t acc_size, struct sg_table *sg, - struct reservation_object *resv, + struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** @@ -545,7 +545,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * point to the shmem object backing a GEM object if TTM is used to back a * GEM user interface. * @acc_size: Accounted size for this object. - * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one. + * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one. * @destroy: Destroy function. Use NULL for kfree(). * * This function initializes a pre-allocated struct ttm_buffer_object. @@ -570,7 +570,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interrubtible, size_t acc_size, - struct sg_table *sg, struct reservation_object *resv, + struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); /** diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3f1935c19a66..e88e00c6cbf2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include "ttm_bo_api.h" #include "ttm_memory.h" @@ -654,14 +654,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, if (WARN_ON(ticket)) return -EBUSY; - success = reservation_object_trylock(bo->base.resv); + success = dma_resv_trylock(bo->base.resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = reservation_object_lock_interruptible(bo->base.resv, ticket); + ret = dma_resv_lock_interruptible(bo->base.resv, ticket); else - ret = reservation_object_lock(bo->base.resv, ticket); + ret = dma_resv_lock(bo->base.resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -745,10 +745,10 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, WARN_ON(!kref_read(&bo->kref)); if (interruptible) - ret = reservation_object_lock_slow_interruptible(bo->base.resv, + ret = dma_resv_lock_slow_interruptible(bo->base.resv, ticket); else - reservation_object_lock_slow(bo->base.resv, ticket); + dma_resv_lock_slow(bo->base.resv, ticket); if (likely(ret == 0)) ttm_bo_del_sub_from_lru(bo); @@ -773,7 +773,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) else ttm_bo_move_to_lru_tail(bo, NULL); spin_unlock(&bo->bdev->glob->lru_lock); - reservation_object_unlock(bo->base.resv); + dma_resv_unlock(bo->base.resv); } /* diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index bae060fae862..ec212cb27fdc 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -306,7 +306,7 @@ struct dma_buf { struct module *owner; struct list_head list_node; void *priv; - struct reservation_object *resv; + struct dma_resv *resv; /* poll support */ wait_queue_head_t poll; @@ -365,7 +365,7 @@ struct dma_buf_export_info { const struct dma_buf_ops *ops; size_t size; int flags; - struct reservation_object *resv; + struct dma_resv *resv; void *priv; }; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index bea1d05cf51e..404aa748eda6 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -279,7 +279,7 @@ static inline struct dma_fence *dma_fence_get(struct dma_fence *fence) } /** - * dma_fence_get_rcu - get a fence from a reservation_object_list with + * dma_fence_get_rcu - get a fence from a dma_resv_list with * rcu read lock * @fence: fence to increase refcount of * @@ -303,7 +303,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * so long as the caller is using RCU on the pointer to the fence. * * An alternative mechanism is to employ a seqlock to protect a bunch of - * fences, such as used by struct reservation_object. When using a seqlock, + * fences, such as used by struct dma_resv. When using a seqlock, * the seqlock must be taken before and checked after a reference to the * fence is acquired (as shown here). * diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h new file mode 100644 index 000000000000..38f2802afabb --- /dev/null +++ b/include/linux/dma-resv.h @@ -0,0 +1,312 @@ +/* + * Header file for reservations for dma-buf and ttm + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Copyright (C) 2012-2013 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark + * Maarten Lankhorst + * Thomas Hellstrom + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _LINUX_RESERVATION_H +#define _LINUX_RESERVATION_H + +#include +#include +#include +#include +#include + +extern struct ww_class reservation_ww_class; + +/** + * struct dma_resv_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ +struct dma_resv_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct dma_fence __rcu *shared[]; +}; + +/** + * struct dma_resv - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + */ +struct dma_resv { + struct ww_mutex lock; + + struct dma_fence __rcu *fence_excl; + struct dma_resv_list __rcu *fence; +}; + +#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) +#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) + +/** + * dma_resv_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. Writers must hold obj->lock, readers may only + * hold a RCU read side lock. + * + * RETURNS + * The exclusive fence or NULL + */ +static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + dma_resv_held(obj)); +} + +/** + * dma_resv_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ +static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj) +{ + return rcu_dereference_protected(obj->fence, + dma_resv_held(obj)); +} + +/** + * dma_resv_fences - read consistent fence pointers + * @obj: reservation object where we get the fences from + * @excl: pointer for the exclusive fence + * @list: pointer for the shared fence list + * + * Make sure we have a consisten exclusive fence and shared fence list. + * Must be called with rcu read side lock held. + */ +static inline void dma_resv_fences(struct dma_resv *obj, + struct dma_fence **excl, + struct dma_resv_list **list, + u32 *shared_count) +{ + do { + *excl = rcu_dereference(obj->fence_excl); + *list = rcu_dereference(obj->fence); + *shared_count = *list ? (*list)->shared_count : 0; + smp_rmb(); /* See dma_resv_add_excl_fence */ + } while (rcu_access_pointer(obj->fence_excl) != *excl); +} + +/** + * dma_resv_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ +static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj) +{ + struct dma_fence *fence; + + if (!rcu_access_pointer(obj->fence_excl)) + return NULL; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&obj->fence_excl); + rcu_read_unlock(); + + return fence; +} + +/** + * dma_resv_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int dma_resv_lock(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * dma_resv_lock_interruptible - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object interruptible for exclusive access and + * modification. Note, that the lock is only against other writers, readers + * will run concurrently with a writer under RCU. The seqlock is used to + * notify readers if they overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int dma_resv_lock_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_interruptible(&obj->lock, ctx); +} + +/** + * dma_resv_lock_slow - slowpath lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object after a die case. This function + * will sleep until the lock becomes available. See dma_resv_lock() as + * well. + */ +static inline void dma_resv_lock_slow(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + ww_mutex_lock_slow(&obj->lock, ctx); +} + +/** + * dma_resv_lock_slow_interruptible - slowpath lock the reservation + * object, interruptible + * @obj: the reservation object + * @ctx: the locking context + * + * Acquires the reservation object interruptible after a die case. This function + * will sleep until the lock becomes available. See + * dma_resv_lock_interruptible() as well. + */ +static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); +} + +/** + * dma_resv_trylock - trylock the reservation object + * @obj: the reservation object + * + * Tries to lock the reservation object for exclusive access and modification. + * Note, that the lock is only against other writers, readers will run + * concurrently with a writer under RCU. The seqlock is used to notify readers + * if they overlap with a writer. + * + * Also note that since no context is provided, no deadlock protection is + * possible. + * + * Returns true if the lock was acquired, false otherwise. + */ +static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) +{ + return ww_mutex_trylock(&obj->lock); +} + +/** + * dma_resv_is_locked - is the reservation object locked + * @obj: the reservation object + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool dma_resv_is_locked(struct dma_resv *obj) +{ + return ww_mutex_is_locked(&obj->lock); +} + +/** + * dma_resv_locking_ctx - returns the context used to lock the object + * @obj: the reservation object + * + * Returns the context used to lock a reservation object or NULL if no context + * was used or the object is not locked at all. + */ +static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) +{ + return READ_ONCE(obj->lock.ctx); +} + +/** + * dma_resv_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void dma_resv_unlock(struct dma_resv *obj) +{ +#ifdef CONFIG_DEBUG_MUTEXES + /* Test shared fence slot reservation */ + if (rcu_access_pointer(obj->fence)) { + struct dma_resv_list *fence = dma_resv_get_list(obj); + + fence->shared_max = fence->shared_count; + } +#endif + ww_mutex_unlock(&obj->lock); +} + +void dma_resv_init(struct dma_resv *obj); +void dma_resv_fini(struct dma_resv *obj); +int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); +void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); + +void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); + +int dma_resv_get_fences_rcu(struct dma_resv *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared); + +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); + +long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr, + unsigned long timeout); + +bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all); + +#endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h deleted file mode 100644 index acddefea694f..000000000000 --- a/include/linux/reservation.h +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Header file for reservations for dma-buf and ttm - * - * Copyright(C) 2011 Linaro Limited. All rights reserved. - * Copyright (C) 2012-2013 Canonical Ltd - * Copyright (C) 2012 Texas Instruments - * - * Authors: - * Rob Clark - * Maarten Lankhorst - * Thomas Hellstrom - * - * Based on bo.c which bears the following copyright notice, - * but is dual licensed: - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _LINUX_RESERVATION_H -#define _LINUX_RESERVATION_H - -#include -#include -#include -#include -#include - -extern struct ww_class reservation_ww_class; - -/** - * struct reservation_object_list - a list of shared fences - * @rcu: for internal use - * @shared_count: table of shared fences - * @shared_max: for growing shared fence table - * @shared: shared fence table - */ -struct reservation_object_list { - struct rcu_head rcu; - u32 shared_count, shared_max; - struct dma_fence __rcu *shared[]; -}; - -/** - * struct reservation_object - a reservation object manages fences for a buffer - * @lock: update side lock - * @seq: sequence count for managing RCU read-side synchronization - * @fence_excl: the exclusive fence, if there is one currently - * @fence: list of current shared fences - */ -struct reservation_object { - struct ww_mutex lock; - - struct dma_fence __rcu *fence_excl; - struct reservation_object_list __rcu *fence; -}; - -#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) -#define reservation_object_assert_held(obj) \ - lockdep_assert_held(&(obj)->lock.base) - -/** - * reservation_object_get_excl - get the reservation object's - * exclusive fence, with update-side lock held - * @obj: the reservation object - * - * Returns the exclusive fence (if any). Does NOT take a - * reference. Writers must hold obj->lock, readers may only - * hold a RCU read side lock. - * - * RETURNS - * The exclusive fence or NULL - */ -static inline struct dma_fence * -reservation_object_get_excl(struct reservation_object *obj) -{ - return rcu_dereference_protected(obj->fence_excl, - reservation_object_held(obj)); -} - -/** - * reservation_object_get_list - get the reservation object's - * shared fence list, with update-side lock held - * @obj: the reservation object - * - * Returns the shared fence list. Does NOT take references to - * the fence. The obj->lock must be held. - */ -static inline struct reservation_object_list * -reservation_object_get_list(struct reservation_object *obj) -{ - return rcu_dereference_protected(obj->fence, - reservation_object_held(obj)); -} - -/** - * reservation_object_fences - read consistent fence pointers - * @obj: reservation object where we get the fences from - * @excl: pointer for the exclusive fence - * @list: pointer for the shared fence list - * - * Make sure we have a consisten exclusive fence and shared fence list. - * Must be called with rcu read side lock held. - */ -static inline void -reservation_object_fences(struct reservation_object *obj, - struct dma_fence **excl, - struct reservation_object_list **list, - u32 *shared_count) -{ - do { - *excl = rcu_dereference(obj->fence_excl); - *list = rcu_dereference(obj->fence); - *shared_count = *list ? (*list)->shared_count : 0; - smp_rmb(); /* See reservation_object_add_excl_fence */ - } while (rcu_access_pointer(obj->fence_excl) != *excl); -} - -/** - * reservation_object_get_excl_rcu - get the reservation object's - * exclusive fence, without lock held. - * @obj: the reservation object - * - * If there is an exclusive fence, this atomically increments it's - * reference count and returns it. - * - * RETURNS - * The exclusive fence or NULL if none - */ -static inline struct dma_fence * -reservation_object_get_excl_rcu(struct reservation_object *obj) -{ - struct dma_fence *fence; - - if (!rcu_access_pointer(obj->fence_excl)) - return NULL; - - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&obj->fence_excl); - rcu_read_unlock(); - - return fence; -} - -/** - * reservation_object_lock - lock the reservation object - * @obj: the reservation object - * @ctx: the locking context - * - * Locks the reservation object for exclusive access and modification. Note, - * that the lock is only against other writers, readers will run concurrently - * with a writer under RCU. The seqlock is used to notify readers if they - * overlap with a writer. - * - * As the reservation object may be locked by multiple parties in an - * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle - * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation - * object may be locked by itself by passing NULL as @ctx. - */ -static inline int -reservation_object_lock(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) -{ - return ww_mutex_lock(&obj->lock, ctx); -} - -/** - * reservation_object_lock_interruptible - lock the reservation object - * @obj: the reservation object - * @ctx: the locking context - * - * Locks the reservation object interruptible for exclusive access and - * modification. Note, that the lock is only against other writers, readers - * will run concurrently with a writer under RCU. The seqlock is used to - * notify readers if they overlap with a writer. - * - * As the reservation object may be locked by multiple parties in an - * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle - * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation - * object may be locked by itself by passing NULL as @ctx. - */ -static inline int -reservation_object_lock_interruptible(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) -{ - return ww_mutex_lock_interruptible(&obj->lock, ctx); -} - -/** - * reservation_object_lock_slow - slowpath lock the reservation object - * @obj: the reservation object - * @ctx: the locking context - * - * Acquires the reservation object after a die case. This function - * will sleep until the lock becomes available. See reservation_object_lock() as - * well. - */ -static inline void -reservation_object_lock_slow(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) -{ - ww_mutex_lock_slow(&obj->lock, ctx); -} - -/** - * reservation_object_lock_slow_interruptible - slowpath lock the reservation - * object, interruptible - * @obj: the reservation object - * @ctx: the locking context - * - * Acquires the reservation object interruptible after a die case. This function - * will sleep until the lock becomes available. See - * reservation_object_lock_interruptible() as well. - */ -static inline int -reservation_object_lock_slow_interruptible(struct reservation_object *obj, - struct ww_acquire_ctx *ctx) -{ - return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); -} - -/** - * reservation_object_trylock - trylock the reservation object - * @obj: the reservation object - * - * Tries to lock the reservation object for exclusive access and modification. - * Note, that the lock is only against other writers, readers will run - * concurrently with a writer under RCU. The seqlock is used to notify readers - * if they overlap with a writer. - * - * Also note that since no context is provided, no deadlock protection is - * possible. - * - * Returns true if the lock was acquired, false otherwise. - */ -static inline bool __must_check -reservation_object_trylock(struct reservation_object *obj) -{ - return ww_mutex_trylock(&obj->lock); -} - -/** - * reservation_object_is_locked - is the reservation object locked - * @obj: the reservation object - * - * Returns true if the mutex is locked, false if unlocked. - */ -static inline bool -reservation_object_is_locked(struct reservation_object *obj) -{ - return ww_mutex_is_locked(&obj->lock); -} - -/** - * reservation_object_locking_ctx - returns the context used to lock the object - * @obj: the reservation object - * - * Returns the context used to lock a reservation object or NULL if no context - * was used or the object is not locked at all. - */ -static inline struct ww_acquire_ctx * -reservation_object_locking_ctx(struct reservation_object *obj) -{ - return READ_ONCE(obj->lock.ctx); -} - -/** - * reservation_object_unlock - unlock the reservation object - * @obj: the reservation object - * - * Unlocks the reservation object following exclusive access. - */ -static inline void -reservation_object_unlock(struct reservation_object *obj) -{ -#ifdef CONFIG_DEBUG_MUTEXES - /* Test shared fence slot reservation */ - if (rcu_access_pointer(obj->fence)) { - struct reservation_object_list *fence = - reservation_object_get_list(obj); - - fence->shared_max = fence->shared_count; - } -#endif - ww_mutex_unlock(&obj->lock); -} - -void reservation_object_init(struct reservation_object *obj); -void reservation_object_fini(struct reservation_object *obj); -int reservation_object_reserve_shared(struct reservation_object *obj, - unsigned int num_fences); -void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence); - -void reservation_object_add_excl_fence(struct reservation_object *obj, - struct dma_fence *fence); - -int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared); - -int reservation_object_copy_fences(struct reservation_object *dst, - struct reservation_object *src); - -long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - bool wait_all, bool intr, - unsigned long timeout); - -bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - bool test_all); - -#endif /* _LINUX_RESERVATION_H */ -- cgit v1.2.3