diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-10-25 13:00:45 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2016-10-25 14:40:39 +0200 |
commit | f54d1867005c3323f5d8ad83eed823e84226c429 (patch) | |
tree | 026c3f57bc546d3a0205389d0f8e0d02ce8a76ac /drivers/dma-buf | |
parent | 0fc4f78f44e6c6148cee32456f0d0023ec1c1fd8 (diff) | |
download | linux-f54d1867005c3323f5d8ad83eed823e84226c429.tar.gz linux-f54d1867005c3323f5d8ad83eed823e84226c429.tar.bz2 linux-f54d1867005c3323f5d8ad83eed823e84226c429.zip |
dma-buf: Rename struct fence to dma_fence
I plan to usurp the short name of struct fence for a core kernel struct,
and so I need to rename the specialised fence/timeline for DMA
operations to make room.
A consensus was reached in
https://lists.freedesktop.org/archives/dri-devel/2016-July/113083.html
that making clear this fence applies to DMA operations was a good thing.
Since then the patch has grown a bit as usage increases, so hopefully it
remains a good thing!
(v2...: rebase, rerun spatch)
v3: Compile on msm, spotted a manual fixup that I broke.
v4: Try again for msm, sorry Daniel
coccinelle script:
@@
@@
- struct fence
+ struct dma_fence
@@
@@
- struct fence_ops
+ struct dma_fence_ops
@@
@@
- struct fence_cb
+ struct dma_fence_cb
@@
@@
- struct fence_array
+ struct dma_fence_array
@@
@@
- enum fence_flag_bits
+ enum dma_fence_flag_bits
@@
@@
(
- fence_init
+ dma_fence_init
|
- fence_release
+ dma_fence_release
|
- fence_free
+ dma_fence_free
|
- fence_get
+ dma_fence_get
|
- fence_get_rcu
+ dma_fence_get_rcu
|
- fence_put
+ dma_fence_put
|
- fence_signal
+ dma_fence_signal
|
- fence_signal_locked
+ dma_fence_signal_locked
|
- fence_default_wait
+ dma_fence_default_wait
|
- fence_add_callback
+ dma_fence_add_callback
|
- fence_remove_callback
+ dma_fence_remove_callback
|
- fence_enable_sw_signaling
+ dma_fence_enable_sw_signaling
|
- fence_is_signaled_locked
+ dma_fence_is_signaled_locked
|
- fence_is_signaled
+ dma_fence_is_signaled
|
- fence_is_later
+ dma_fence_is_later
|
- fence_later
+ dma_fence_later
|
- fence_wait_timeout
+ dma_fence_wait_timeout
|
- fence_wait_any_timeout
+ dma_fence_wait_any_timeout
|
- fence_wait
+ dma_fence_wait
|
- fence_context_alloc
+ dma_fence_context_alloc
|
- fence_array_create
+ dma_fence_array_create
|
- to_fence_array
+ to_dma_fence_array
|
- fence_is_array
+ dma_fence_is_array
|
- trace_fence_emit
+ trace_dma_fence_emit
|
- FENCE_TRACE
+ DMA_FENCE_TRACE
|
- FENCE_WARN
+ DMA_FENCE_WARN
|
- FENCE_ERR
+ DMA_FENCE_ERR
)
(
...
)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161025120045.28839-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma-buf/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 28 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-array.c (renamed from drivers/dma-buf/fence-array.c) | 91 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence.c (renamed from drivers/dma-buf/fence.c) | 199 | ||||
-rw-r--r-- | drivers/dma-buf/reservation.c | 94 | ||||
-rw-r--r-- | drivers/dma-buf/seqno-fence.c | 18 | ||||
-rw-r--r-- | drivers/dma-buf/sw_sync.c | 48 | ||||
-rw-r--r-- | drivers/dma-buf/sync_debug.c | 13 | ||||
-rw-r--r-- | drivers/dma-buf/sync_debug.h | 9 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 63 |
11 files changed, 288 insertions, 279 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 2585821b24ab..ed3b785bae37 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -7,7 +7,7 @@ config SYNC_FILE select DMA_SHARED_BUFFER ---help--- The Sync File Framework adds explicit syncronization via - userspace. It enables send/receive 'struct fence' objects to/from + userspace. It enables send/receive 'struct dma_fence' objects to/from userspace via Sync File fds for synchronization between drivers via userspace components. It has been ported from Android. diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 210a10bfad2b..c33bf8863147 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,3 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o +obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index cf04d249a6a4..e72e64484131 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -25,7 +25,7 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/dma-buf.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/anon_inodes.h> #include <linux/export.h> #include <linux/debugfs.h> @@ -124,7 +124,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } -static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb) +static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; unsigned long flags; @@ -140,7 +140,7 @@ static unsigned int dma_buf_poll(struct file *file, poll_table *poll) struct dma_buf *dmabuf; struct reservation_object *resv; struct reservation_object_list *fobj; - struct fence *fence_excl; + struct dma_fence *fence_excl; unsigned long events; unsigned shared_count, seq; @@ -187,20 +187,20 @@ retry: spin_unlock_irq(&dmabuf->poll.lock); if (events & pevents) { - if (!fence_get_rcu(fence_excl)) { + if (!dma_fence_get_rcu(fence_excl)) { /* force a recheck */ events &= ~pevents; dma_buf_poll_cb(NULL, &dcb->cb); - } else if (!fence_add_callback(fence_excl, &dcb->cb, - dma_buf_poll_cb)) { + } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, + dma_buf_poll_cb)) { events &= ~pevents; - fence_put(fence_excl); + dma_fence_put(fence_excl); } else { /* * No callback queued, wake up any additional * waiters. */ - fence_put(fence_excl); + dma_fence_put(fence_excl); dma_buf_poll_cb(NULL, &dcb->cb); } } @@ -222,9 +222,9 @@ retry: goto out; for (i = 0; i < shared_count; ++i) { - struct fence *fence = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - if (!fence_get_rcu(fence)) { + if (!dma_fence_get_rcu(fence)) { /* * fence refcount dropped to zero, this means * that fobj has been freed @@ -235,13 +235,13 @@ retry: dma_buf_poll_cb(NULL, &dcb->cb); break; } - if (!fence_add_callback(fence, &dcb->cb, - dma_buf_poll_cb)) { - fence_put(fence); + if (!dma_fence_add_callback(fence, &dcb->cb, + dma_buf_poll_cb)) { + dma_fence_put(fence); events &= ~POLLOUT; break; } - fence_put(fence); + dma_fence_put(fence); } /* No callback queued, wake up any additional waiters. */ diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/dma-fence-array.c index f1989fcaf354..67eb7c8fb88c 100644 --- a/drivers/dma-buf/fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -1,5 +1,5 @@ /* - * fence-array: aggregate fences to be waited together + * dma-fence-array: aggregate fences to be waited together * * Copyright (C) 2016 Collabora Ltd * Copyright (C) 2016 Advanced Micro Devices, Inc. @@ -19,35 +19,34 @@ #include <linux/export.h> #include <linux/slab.h> -#include <linux/fence-array.h> +#include <linux/dma-fence-array.h> -static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); - -static const char *fence_array_get_driver_name(struct fence *fence) +static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) { - return "fence_array"; + return "dma_fence_array"; } -static const char *fence_array_get_timeline_name(struct fence *fence) +static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) { return "unbound"; } -static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) +static void dma_fence_array_cb_func(struct dma_fence *f, + struct dma_fence_cb *cb) { - struct fence_array_cb *array_cb = - container_of(cb, struct fence_array_cb, cb); - struct fence_array *array = array_cb->array; + struct dma_fence_array_cb *array_cb = + container_of(cb, struct dma_fence_array_cb, cb); + struct dma_fence_array *array = array_cb->array; if (atomic_dec_and_test(&array->num_pending)) - fence_signal(&array->base); - fence_put(&array->base); + dma_fence_signal(&array->base); + dma_fence_put(&array->base); } -static bool fence_array_enable_signaling(struct fence *fence) +static bool dma_fence_array_enable_signaling(struct dma_fence *fence) { - struct fence_array *array = to_fence_array(fence); - struct fence_array_cb *cb = (void *)(&array[1]); + struct dma_fence_array *array = to_dma_fence_array(fence); + struct dma_fence_array_cb *cb = (void *)(&array[1]); unsigned i; for (i = 0; i < array->num_fences; ++i) { @@ -60,10 +59,10 @@ static bool fence_array_enable_signaling(struct fence *fence) * until we signal the array as complete (but that is now * insufficient). */ - fence_get(&array->base); - if (fence_add_callback(array->fences[i], &cb[i].cb, - fence_array_cb_func)) { - fence_put(&array->base); + dma_fence_get(&array->base); + if (dma_fence_add_callback(array->fences[i], &cb[i].cb, + dma_fence_array_cb_func)) { + dma_fence_put(&array->base); if (atomic_dec_and_test(&array->num_pending)) return false; } @@ -72,69 +71,71 @@ static bool fence_array_enable_signaling(struct fence *fence) return true; } -static bool fence_array_signaled(struct fence *fence) +static bool dma_fence_array_signaled(struct dma_fence *fence) { - struct fence_array *array = to_fence_array(fence); + struct dma_fence_array *array = to_dma_fence_array(fence); return atomic_read(&array->num_pending) <= 0; } -static void fence_array_release(struct fence *fence) +static void dma_fence_array_release(struct dma_fence *fence) { - struct fence_array *array = to_fence_array(fence); + struct dma_fence_array *array = to_dma_fence_array(fence); unsigned i; for (i = 0; i < array->num_fences; ++i) - fence_put(array->fences[i]); + dma_fence_put(array->fences[i]); kfree(array->fences); - fence_free(fence); + dma_fence_free(fence); } -const struct fence_ops fence_array_ops = { - .get_driver_name = fence_array_get_driver_name, - .get_timeline_name = fence_array_get_timeline_name, - .enable_signaling = fence_array_enable_signaling, - .signaled = fence_array_signaled, - .wait = fence_default_wait, - .release = fence_array_release, +const struct dma_fence_ops dma_fence_array_ops = { + .get_driver_name = dma_fence_array_get_driver_name, + .get_timeline_name = dma_fence_array_get_timeline_name, + .enable_signaling = dma_fence_array_enable_signaling, + .signaled = dma_fence_array_signaled, + .wait = dma_fence_default_wait, + .release = dma_fence_array_release, }; -EXPORT_SYMBOL(fence_array_ops); +EXPORT_SYMBOL(dma_fence_array_ops); /** - * fence_array_create - Create a custom fence array + * dma_fence_array_create - Create a custom fence array * @num_fences: [in] number of fences to add in the array * @fences: [in] array containing the fences * @context: [in] fence context to use * @seqno: [in] sequence number to use * @signal_on_any: [in] signal on any fence in the array * - * Allocate a fence_array object and initialize the base fence with fence_init(). + * Allocate a dma_fence_array object and initialize the base fence with + * dma_fence_init(). * In case of error it returns NULL. * * The caller should allocate the fences array with num_fences size * and fill it with the fences it wants to add to the object. Ownership of this - * array is taken and fence_put() is used on each fence on release. + * array is taken and dma_fence_put() is used on each fence on release. * * If @signal_on_any is true the fence array signals if any fence in the array * signals, otherwise it signals when all fences in the array signal. */ -struct fence_array *fence_array_create(int num_fences, struct fence **fences, - u64 context, unsigned seqno, - bool signal_on_any) +struct dma_fence_array *dma_fence_array_create(int num_fences, + struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) { - struct fence_array *array; + struct dma_fence_array *array; size_t size = sizeof(*array); /* Allocate the callback structures behind the array. */ - size += num_fences * sizeof(struct fence_array_cb); + size += num_fences * sizeof(struct dma_fence_array_cb); array = kzalloc(size, GFP_KERNEL); if (!array) return NULL; spin_lock_init(&array->lock); - fence_init(&array->base, &fence_array_ops, &array->lock, - context, seqno); + dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, + context, seqno); array->num_fences = num_fences; atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); @@ -142,4 +143,4 @@ struct fence_array *fence_array_create(int num_fences, struct fence **fences, return array; } -EXPORT_SYMBOL(fence_array_create); +EXPORT_SYMBOL(dma_fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/dma-fence.c index cc05dddc77a6..3a7bf009c21c 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -21,13 +21,13 @@ #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #define CREATE_TRACE_POINTS -#include <trace/events/fence.h> +#include <trace/events/dma_fence.h> -EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on); -EXPORT_TRACEPOINT_SYMBOL(fence_emit); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); /* * fence context counter: each execution context should have its own @@ -35,37 +35,37 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ -static atomic64_t fence_context_counter = ATOMIC64_INIT(0); +static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0); /** - * fence_context_alloc - allocate an array of fence contexts + * dma_fence_context_alloc - allocate an array of fence contexts * @num: [in] amount of contexts to allocate * * This function will return the first index of the number of fences allocated. * The fence context is used for setting fence->context to a unique number. */ -u64 fence_context_alloc(unsigned num) +u64 dma_fence_context_alloc(unsigned num) { BUG_ON(!num); - return atomic64_add_return(num, &fence_context_counter) - num; + return atomic64_add_return(num, &dma_fence_context_counter) - num; } -EXPORT_SYMBOL(fence_context_alloc); +EXPORT_SYMBOL(dma_fence_context_alloc); /** - * fence_signal_locked - signal completion of a fence + * dma_fence_signal_locked - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock - * fence_wait() calls and run all the callbacks added with - * fence_add_callback(). Can be called multiple times, but since a fence + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from unsignaled to signaled state, it will only be effective * the first time. * - * Unlike fence_signal, this function must be called with fence->lock held. + * Unlike dma_fence_signal, this function must be called with fence->lock held. */ -int fence_signal_locked(struct fence *fence) +int dma_fence_signal_locked(struct dma_fence *fence) { - struct fence_cb *cur, *tmp; + struct dma_fence_cb *cur, *tmp; int ret = 0; lockdep_assert_held(fence->lock); @@ -78,15 +78,15 @@ int fence_signal_locked(struct fence *fence) smp_mb__before_atomic(); } - if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { ret = -EINVAL; /* - * we might have raced with the unlocked fence_signal, + * we might have raced with the unlocked dma_fence_signal, * still run through all callbacks */ } else - trace_fence_signaled(fence); + trace_dma_fence_signaled(fence); list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { list_del_init(&cur->node); @@ -94,19 +94,19 @@ int fence_signal_locked(struct fence *fence) } return ret; } -EXPORT_SYMBOL(fence_signal_locked); +EXPORT_SYMBOL(dma_fence_signal_locked); /** - * fence_signal - signal completion of a fence + * dma_fence_signal - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock - * fence_wait() calls and run all the callbacks added with - * fence_add_callback(). Can be called multiple times, but since a fence + * dma_fence_wait() calls and run all the callbacks added with + * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from unsignaled to signaled state, it will only be effective * the first time. */ -int fence_signal(struct fence *fence) +int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; @@ -118,13 +118,13 @@ int fence_signal(struct fence *fence) smp_mb__before_atomic(); } - if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return -EINVAL; - trace_fence_signaled(fence); + trace_dma_fence_signaled(fence); - if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { - struct fence_cb *cur, *tmp; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { + struct dma_fence_cb *cur, *tmp; spin_lock_irqsave(fence->lock, flags); list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { @@ -135,10 +135,10 @@ int fence_signal(struct fence *fence) } return 0; } -EXPORT_SYMBOL(fence_signal); +EXPORT_SYMBOL(dma_fence_signal); /** - * fence_wait_timeout - sleep until the fence gets signaled + * dma_fence_wait_timeout - sleep until the fence gets signaled * or until timeout elapses * @fence: [in] the fence to wait on * @intr: [in] if true, do an interruptible wait @@ -154,7 +154,7 @@ EXPORT_SYMBOL(fence_signal); * freed before return, resulting in undefined behavior. */ signed long -fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) +dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) { signed long ret; @@ -162,70 +162,71 @@ fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) return -EINVAL; if (timeout == 0) - return fence_is_signaled(fence); + return dma_fence_is_signaled(fence); - trace_fence_wait_start(fence); + trace_dma_fence_wait_start(fence); ret = fence->ops->wait(fence, intr, timeout); - trace_fence_wait_end(fence); + trace_dma_fence_wait_end(fence); return ret; } -EXPORT_SYMBOL(fence_wait_timeout); +EXPORT_SYMBOL(dma_fence_wait_timeout); -void fence_release(struct kref *kref) +void dma_fence_release(struct kref *kref) { - struct fence *fence = - container_of(kref, struct fence, refcount); + struct dma_fence *fence = + container_of(kref, struct dma_fence, refcount); - trace_fence_destroy(fence); + trace_dma_fence_destroy(fence); BUG_ON(!list_empty(&fence->cb_list)); if (fence->ops->release) fence->ops->release(fence); else - fence_free(fence); + dma_fence_free(fence); } -EXPORT_SYMBOL(fence_release); +EXPORT_SYMBOL(dma_fence_release); -void fence_free(struct fence *fence) +void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); } -EXPORT_SYMBOL(fence_free); +EXPORT_SYMBOL(dma_fence_free); /** - * fence_enable_sw_signaling - enable signaling on fence + * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: [in] the fence to enable * * this will request for sw signaling to be enabled, to make the fence * complete as soon as possible */ -void fence_enable_sw_signaling(struct fence *fence) +void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && - !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - trace_fence_enable_signal(fence); + if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + trace_dma_fence_enable_signal(fence); spin_lock_irqsave(fence->lock, flags); if (!fence->ops->enable_signaling(fence)) - fence_signal_locked(fence); + dma_fence_signal_locked(fence); spin_unlock_irqrestore(fence->lock, flags); } } -EXPORT_SYMBOL(fence_enable_sw_signaling); +EXPORT_SYMBOL(dma_fence_enable_sw_signaling); /** - * fence_add_callback - add a callback to be called when the fence + * dma_fence_add_callback - add a callback to be called when the fence * is signaled * @fence: [in] the fence to wait on * @cb: [in] the callback to register * @func: [in] the function to call * - * cb will be initialized by fence_add_callback, no initialization + * cb will be initialized by dma_fence_add_callback, no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * @@ -234,15 +235,15 @@ EXPORT_SYMBOL(fence_enable_sw_signaling); * *not* call the callback) * * Add a software callback to the fence. Same restrictions apply to - * refcount as it does to fence_wait, however the caller doesn't need to + * refcount as it does to dma_fence_wait, however the caller doesn't need to * keep a refcount to fence afterwards: when software access is enabled, * the creator of the fence is required to keep the fence alive until - * after it signals with fence_signal. The callback itself can be called + * after it signals with dma_fence_signal. The callback itself can be called * from irq context. * */ -int fence_add_callback(struct fence *fence, struct fence_cb *cb, - fence_func_t func) +int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, + dma_fence_func_t func) { unsigned long flags; int ret = 0; @@ -251,22 +252,23 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, if (WARN_ON(!fence || !func)) return -EINVAL; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } spin_lock_irqsave(fence->lock, flags); - was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) ret = -ENOENT; else if (!was_set) { - trace_fence_enable_signal(fence); + trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { - fence_signal_locked(fence); + dma_fence_signal_locked(fence); ret = -ENOENT; } } @@ -280,10 +282,10 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, return ret; } -EXPORT_SYMBOL(fence_add_callback); +EXPORT_SYMBOL(dma_fence_add_callback); /** - * fence_remove_callback - remove a callback from the signaling list + * dma_fence_remove_callback - remove a callback from the signaling list * @fence: [in] the fence to wait on * @cb: [in] the callback to remove * @@ -298,7 +300,7 @@ EXPORT_SYMBOL(fence_add_callback); * with a reference held to the fence. */ bool -fence_remove_callback(struct fence *fence, struct fence_cb *cb) +dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { unsigned long flags; bool ret; @@ -313,15 +315,15 @@ fence_remove_callback(struct fence *fence, struct fence_cb *cb) return ret; } -EXPORT_SYMBOL(fence_remove_callback); +EXPORT_SYMBOL(dma_fence_remove_callback); struct default_wait_cb { - struct fence_cb base; + struct dma_fence_cb base; struct task_struct *task; }; static void -fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) +dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); @@ -330,7 +332,7 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) } /** - * fence_default_wait - default sleep until the fence gets signaled + * dma_fence_default_wait - default sleep until the fence gets signaled * or until timeout elapses * @fence: [in] the fence to wait on * @intr: [in] if true, do an interruptible wait @@ -340,14 +342,14 @@ fence_default_wait_cb(struct fence *fence, struct fence_cb *cb) * remaining timeout in jiffies on success. */ signed long -fence_default_wait(struct fence *fence, bool intr, signed long timeout) +dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct default_wait_cb cb; unsigned long flags; signed long ret = timeout; bool was_set; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return timeout; spin_lock_irqsave(fence->lock, flags); @@ -357,25 +359,26 @@ fence_default_wait(struct fence *fence, bool intr, signed long timeout) goto out; } - was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (!was_set) { - trace_fence_enable_signal(fence); + trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { - fence_signal_locked(fence); + dma_fence_signal_locked(fence); goto out; } } - cb.base.func = fence_default_wait_cb; + cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); - while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { + while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { if (intr) __set_current_state(TASK_INTERRUPTIBLE); else @@ -397,23 +400,23 @@ out: spin_unlock_irqrestore(fence->lock, flags); return ret; } -EXPORT_SYMBOL(fence_default_wait); +EXPORT_SYMBOL(dma_fence_default_wait); static bool -fence_test_signaled_any(struct fence **fences, uint32_t count) +dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count) { int i; for (i = 0; i < count; ++i) { - struct fence *fence = fences[i]; - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + struct dma_fence *fence = fences[i]; + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return true; } return false; } /** - * fence_wait_any_timeout - sleep until any fence gets signaled + * dma_fence_wait_any_timeout - sleep until any fence gets signaled * or until timeout elapses * @fences: [in] array of fences to wait on * @count: [in] number of fences to wait on @@ -429,8 +432,8 @@ fence_test_signaled_any(struct fence **fences, uint32_t count) * fence might be freed before return, resulting in undefined behavior. */ signed long -fence_wait_any_timeout(struct fence **fences, uint32_t count, - bool intr, signed long timeout) +dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, + bool intr, signed long timeout) { struct default_wait_cb *cb; signed long ret = timeout; @@ -441,7 +444,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, if (timeout == 0) { for (i = 0; i < count; ++i) - if (fence_is_signaled(fences[i])) + if (dma_fence_is_signaled(fences[i])) return 1; return 0; @@ -454,16 +457,16 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, } for (i = 0; i < count; ++i) { - struct fence *fence = fences[i]; + struct dma_fence *fence = fences[i]; - if (fence->ops->wait != fence_default_wait) { + if (fence->ops->wait != dma_fence_default_wait) { ret = -EINVAL; goto fence_rm_cb; } cb[i].task = current; - if (fence_add_callback(fence, &cb[i].base, - fence_default_wait_cb)) { + if (dma_fence_add_callback(fence, &cb[i].base, + dma_fence_default_wait_cb)) { /* This fence is already signaled */ goto fence_rm_cb; } @@ -475,7 +478,7 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, else set_current_state(TASK_UNINTERRUPTIBLE); - if (fence_test_signaled_any(fences, count)) + if (dma_fence_test_signaled_any(fences, count)) break; ret = schedule_timeout(ret); @@ -488,34 +491,34 @@ fence_wait_any_timeout(struct fence **fences, uint32_t count, fence_rm_cb: while (i-- > 0) - fence_remove_callback(fences[i], &cb[i].base); + dma_fence_remove_callback(fences[i], &cb[i].base); err_free_cb: kfree(cb); return ret; } -EXPORT_SYMBOL(fence_wait_any_timeout); +EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** - * fence_init - Initialize a custom fence. + * dma_fence_init - Initialize a custom fence. * @fence: [in] the fence to initialize - * @ops: [in] the fence_ops for operations on this fence + * @ops: [in] the dma_fence_ops for operations on this fence * @lock: [in] the irqsafe spinlock to use for locking this fence * @context: [in] the execution context this fence is run on * @seqno: [in] a linear increasing sequence number for this context * * Initializes an allocated fence, the caller doesn't have to keep its * refcount after committing with this fence, but it will need to hold a - * refcount again if fence_ops.enable_signaling gets called. This can + * refcount again if dma_fence_ops.enable_signaling gets called. This can * be used for other implementing other types of fence. * * context and seqno are used for easy comparison between fences, allowing - * to check which fence is later by simply using fence_later. + * to check which fence is later by simply using dma_fence_later. */ void -fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno) +dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->wait || !ops->enable_signaling || @@ -529,6 +532,6 @@ fence_init(struct fence *fence, const struct fence_ops *ops, fence->seqno = seqno; fence->flags = 0UL; - trace_fence_init(fence); + trace_dma_fence_init(fence); } -EXPORT_SYMBOL(fence_init); +EXPORT_SYMBOL(dma_fence_init); diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index 82de59f7cbbd..7ed56f3edfb7 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -102,17 +102,17 @@ EXPORT_SYMBOL(reservation_object_reserve_shared); static void reservation_object_add_shared_inplace(struct reservation_object *obj, struct reservation_object_list *fobj, - struct fence *fence) + struct dma_fence *fence) { u32 i; - fence_get(fence); + dma_fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < fobj->shared_count; ++i) { - struct fence *old_fence; + struct dma_fence *old_fence; old_fence = rcu_dereference_protected(fobj->shared[i], reservation_object_held(obj)); @@ -123,7 +123,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj, write_seqcount_end(&obj->seq); preempt_enable(); - fence_put(old_fence); + dma_fence_put(old_fence); return; } } @@ -143,12 +143,12 @@ static void reservation_object_add_shared_replace(struct reservation_object *obj, struct reservation_object_list *old, struct reservation_object_list *fobj, - struct fence *fence) + struct dma_fence *fence) { unsigned i; - struct fence *old_fence = NULL; + struct dma_fence *old_fence = NULL; - fence_get(fence); + dma_fence_get(fence); if (!old) { RCU_INIT_POINTER(fobj->shared[0], fence); @@ -165,7 +165,7 @@ reservation_object_add_shared_replace(struct reservation_object *obj, fobj->shared_count = old->shared_count; for (i = 0; i < old->shared_count; ++i) { - struct fence *check; + struct dma_fence *check; check = rcu_dereference_protected(old->shared[i], reservation_object_held(obj)); @@ -196,7 +196,7 @@ done: kfree_rcu(old, rcu); if (old_fence) - fence_put(old_fence); + dma_fence_put(old_fence); } /** @@ -208,7 +208,7 @@ done: * reservation_object_reserve_shared() has been called. */ void reservation_object_add_shared_fence(struct reservation_object *obj, - struct fence *fence) + struct dma_fence *fence) { struct reservation_object_list *old, *fobj = obj->staged; @@ -231,9 +231,9 @@ EXPORT_SYMBOL(reservation_object_add_shared_fence); * Add a fence to the exclusive slot. The obj->lock must be held. */ void reservation_object_add_excl_fence(struct reservation_object *obj, - struct fence *fence) + struct dma_fence *fence) { - struct fence *old_fence = reservation_object_get_excl(obj); + struct dma_fence *old_fence = reservation_object_get_excl(obj); struct reservation_object_list *old; u32 i = 0; @@ -242,7 +242,7 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, i = old->shared_count; if (fence) - fence_get(fence); + dma_fence_get(fence); preempt_disable(); write_seqcount_begin(&obj->seq); @@ -255,11 +255,11 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, /* inplace update, no shared fences */ while (i--) - fence_put(rcu_dereference_protected(old->shared[i], + dma_fence_put(rcu_dereference_protected(old->shared[i], reservation_object_held(obj))); if (old_fence) - fence_put(old_fence); + dma_fence_put(old_fence); } EXPORT_SYMBOL(reservation_object_add_excl_fence); @@ -276,12 +276,12 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence); * Zero or -errno */ int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct fence **pfence_excl, + struct dma_fence **pfence_excl, unsigned *pshared_count, - struct fence ***pshared) + struct dma_fence ***pshared) { - struct fence **shared = NULL; - struct fence *fence_excl; + struct dma_fence **shared = NULL; + struct dma_fence *fence_excl; unsigned int shared_count; int ret = 1; @@ -296,12 +296,12 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, seq = read_seqcount_begin(&obj->seq); fence_excl = rcu_dereference(obj->fence_excl); - if (fence_excl && !fence_get_rcu(fence_excl)) + if (fence_excl && !dma_fence_get_rcu(fence_excl)) goto unlock; fobj = rcu_dereference(obj->fence); if (fobj) { - struct fence **nshared; + struct dma_fence **nshared; size_t sz = sizeof(*shared) * fobj->shared_max; nshared = krealloc(shared, sz, @@ -322,15 +322,15 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, for (i = 0; i < shared_count; ++i) { shared[i] = rcu_dereference(fobj->shared[i]); - if (!fence_get_rcu(shared[i])) + if (!dma_fence_get_rcu(shared[i])) break; } } if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { while (i--) - fence_put(shared[i]); - fence_put(fence_excl); + dma_fence_put(shared[i]); + dma_fence_put(fence_excl); goto unlock; } @@ -368,7 +368,7 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) { - struct fence *fence; + struct dma_fence *fence; unsigned seq, shared_count, i = 0; long ret = timeout; @@ -389,16 +389,17 @@ retry: shared_count = fobj->shared_count; for (i = 0; i < shared_count; ++i) { - struct fence *lfence = rcu_dereference(fobj->shared[i]); + struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); - if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &lfence->flags)) continue; - if (!fence_get_rcu(lfence)) + if (!dma_fence_get_rcu(lfence)) goto unlock_retry; - if (fence_is_signaled(lfence)) { - fence_put(lfence); + if (dma_fence_is_signaled(lfence)) { + dma_fence_put(lfence); continue; } @@ -408,15 +409,16 @@ retry: } if (!shared_count) { - struct fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl && - !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) { - if (!fence_get_rcu(fence_excl)) + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence_excl->flags)) { + if (!dma_fence_get_rcu(fence_excl)) goto unlock_retry; - if (fence_is_signaled(fence_excl)) - fence_put(fence_excl); + if (dma_fence_is_signaled(fence_excl)) + dma_fence_put(fence_excl); else fence = fence_excl; } @@ -425,12 +427,12 @@ retry: rcu_read_unlock(); if (fence) { if (read_seqcount_retry(&obj->seq, seq)) { - fence_put(fence); + dma_fence_put(fence); goto retry; } - ret = fence_wait_timeout(fence, intr, ret); - fence_put(fence); + ret = dma_fence_wait_timeout(fence, intr, ret); + dma_fence_put(fence); if (ret > 0 && wait_all && (i + 1 < shared_count)) goto retry; } @@ -444,18 +446,18 @@ EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); static inline int -reservation_object_test_signaled_single(struct fence *passed_fence) +reservation_object_test_signaled_single(struct dma_fence *passed_fence) { - struct fence *fence, *lfence = passed_fence; + struct dma_fence *fence, *lfence = passed_fence; int ret = 1; - if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { - fence = fence_get_rcu(lfence); + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { + fence = dma_fence_get_rcu(lfence); if (!fence) return -1; - ret = !!fence_is_signaled(fence); - fence_put(fence); + ret = !!dma_fence_is_signaled(fence); + dma_fence_put(fence); } return ret; } @@ -492,7 +494,7 @@ retry: shared_count = fobj->shared_count; for (i = 0; i < shared_count; ++i) { - struct fence *fence = rcu_dereference(fobj->shared[i]); + struct dma_fence *fence = rcu_dereference(fobj->shared[i]); ret = reservation_object_test_signaled_single(fence); if (ret < 0) @@ -506,7 +508,7 @@ retry: } if (!shared_count) { - struct fence *fence_excl = rcu_dereference(obj->fence_excl); + struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl) { ret = reservation_object_test_signaled_single( diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c index 71127f8f1626..f47112a64763 100644 --- a/drivers/dma-buf/seqno-fence.c +++ b/drivers/dma-buf/seqno-fence.c @@ -21,35 +21,35 @@ #include <linux/export.h> #include <linux/seqno-fence.h> -static const char *seqno_fence_get_driver_name(struct fence *fence) +static const char *seqno_fence_get_driver_name(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->get_driver_name(fence); } -static const char *seqno_fence_get_timeline_name(struct fence *fence) +static const char *seqno_fence_get_timeline_name(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->get_timeline_name(fence); } -static bool seqno_enable_signaling(struct fence *fence) +static bool seqno_enable_signaling(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->enable_signaling(fence); } -static bool seqno_signaled(struct fence *fence) +static bool seqno_signaled(struct dma_fence *fence) { struct seqno_fence *seqno_fence = to_seqno_fence(fence); return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence); } -static void seqno_release(struct fence *fence) +static void seqno_release(struct dma_fence *fence) { struct seqno_fence *f = to_seqno_fence(fence); @@ -57,18 +57,18 @@ static void seqno_release(struct fence *fence) if (f->ops->release) f->ops->release(fence); else - fence_free(&f->base); + dma_fence_free(&f->base); } -static signed long seqno_wait(struct fence *fence, bool intr, - signed long timeout) +static signed long seqno_wait(struct dma_fence *fence, bool intr, + signed long timeout) { struct seqno_fence *f = to_seqno_fence(fence); return f->ops->wait(fence, intr, timeout); } -const struct fence_ops seqno_fence_ops = { +const struct dma_fence_ops seqno_fence_ops = { .get_driver_name = seqno_fence_get_driver_name, .get_timeline_name = seqno_fence_get_timeline_name, .enable_signaling = seqno_enable_signaling, diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 62e8e6dc7953..82e0ca4dd0c1 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -68,9 +68,9 @@ struct sw_sync_create_fence_data { #define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) -static const struct fence_ops timeline_fence_ops; +static const struct dma_fence_ops timeline_fence_ops; -static inline struct sync_pt *fence_to_sync_pt(struct fence *fence) +static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence) { if (fence->ops != &timeline_fence_ops) return NULL; @@ -93,7 +93,7 @@ struct sync_timeline *sync_timeline_create(const char *name) return NULL; kref_init(&obj->kref); - obj->context = fence_context_alloc(1); + obj->context = dma_fence_context_alloc(1); strlcpy(obj->name, name, sizeof(obj->name)); INIT_LIST_HEAD(&obj->child_list_head); @@ -146,7 +146,7 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) list_for_each_entry_safe(pt, next, &obj->active_list_head, active_list) { - if (fence_is_signaled_locked(&pt->base)) + if (dma_fence_is_signaled_locked(&pt->base)) list_del_init(&pt->active_list); } @@ -179,30 +179,30 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, spin_lock_irqsave(&obj->child_list_lock, flags); sync_timeline_get(obj); - fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, - obj->context, value); + dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, + obj->context, value); list_add_tail(&pt->child_list, &obj->child_list_head); INIT_LIST_HEAD(&pt->active_list); spin_unlock_irqrestore(&obj->child_list_lock, flags); return pt; } -static const char *timeline_fence_get_driver_name(struct fence *fence) +static const char *timeline_fence_get_driver_name(struct dma_fence *fence) { return "sw_sync"; } -static const char *timeline_fence_get_timeline_name(struct fence *fence) +static const char *timeline_fence_get_timeline_name(struct dma_fence *fence) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); return parent->name; } -static void timeline_fence_release(struct fence *fence) +static void timeline_fence_release(struct dma_fence *fence) { - struct sync_pt *pt = fence_to_sync_pt(fence); - struct sync_timeline *parent = fence_parent(fence); + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + struct sync_timeline *parent = dma_fence_parent(fence); unsigned long flags; spin_lock_irqsave(fence->lock, flags); @@ -212,20 +212,20 @@ static void timeline_fence_release(struct fence *fence) spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); - fence_free(fence); + dma_fence_free(fence); } -static bool timeline_fence_signaled(struct fence *fence) +static bool timeline_fence_signaled(struct dma_fence *fence) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); return (fence->seqno > parent->value) ? false : true; } -static bool timeline_fence_enable_signaling(struct fence *fence) +static bool timeline_fence_enable_signaling(struct dma_fence *fence) { - struct sync_pt *pt = fence_to_sync_pt(fence); - struct sync_timeline *parent = fence_parent(fence); + struct sync_pt *pt = dma_fence_to_sync_pt(fence); + struct sync_timeline *parent = dma_fence_parent(fence); if (timeline_fence_signaled(fence)) return false; @@ -234,26 +234,26 @@ static bool timeline_fence_enable_signaling(struct fence *fence) return true; } -static void timeline_fence_value_str(struct fence *fence, +static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%d", fence->seqno); } -static void timeline_fence_timeline_value_str(struct fence *fence, +static void timeline_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) { - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); snprintf(str, size, "%d", parent->value); } -static const struct fence_ops timeline_fence_ops = { +static const struct dma_fence_ops timeline_fence_ops = { .get_driver_name = timeline_fence_get_driver_name, .get_timeline_name = timeline_fence_get_timeline_name, .enable_signaling = timeline_fence_enable_signaling, .signaled = timeline_fence_signaled, - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .release = timeline_fence_release, .fence_value_str = timeline_fence_value_str, .timeline_value_str = timeline_fence_timeline_value_str, @@ -317,7 +317,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, sync_file = sync_file_create(&pt->base); if (!sync_file) { - fence_put(&pt->base); + dma_fence_put(&pt->base); err = -ENOMEM; goto err; } diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 2dd4c3db6caa..48b20e34fb6d 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -71,12 +71,13 @@ static const char *sync_status_str(int status) return "error"; } -static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) +static void sync_print_fence(struct seq_file *s, + struct dma_fence *fence, bool show) { int status = 1; - struct sync_timeline *parent = fence_parent(fence); + struct sync_timeline *parent = dma_fence_parent(fence); - if (fence_is_signaled_locked(fence)) + if (dma_fence_is_signaled_locked(fence)) status = fence->status; seq_printf(s, " %s%sfence %s", @@ -135,10 +136,10 @@ static void sync_print_sync_file(struct seq_file *s, int i; seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, - sync_status_str(!fence_is_signaled(sync_file->fence))); + sync_status_str(!dma_fence_is_signaled(sync_file->fence))); - if (fence_is_array(sync_file->fence)) { - struct fence_array *array = to_fence_array(sync_file->fence); + if (dma_fence_is_array(sync_file->fence)) { + struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); for (i = 0; i < array->num_fences; ++i) sync_print_fence(s, array->fences[i], true); diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index d269aa6783aa..26fe8b9907b3 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -15,7 +15,7 @@ #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/fence.h> +#include <linux/dma-fence.h> #include <linux/sync_file.h> #include <uapi/linux/sync_file.h> @@ -45,10 +45,9 @@ struct sync_timeline { struct list_head sync_timeline_list; }; -static inline struct sync_timeline *fence_parent(struct fence *fence) +static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence) { - return container_of(fence->lock, struct sync_timeline, - child_list_lock); + return container_of(fence->lock, struct sync_timeline, child_list_lock); } /** @@ -58,7 +57,7 @@ static inline struct sync_timeline *fence_parent(struct fence *fence) * @active_list: sync timeline active child's list */ struct sync_pt { - struct fence base; + struct dma_fence base; struct list_head child_list; struct list_head active_list; }; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 235f8ac113cc..69d8ef98d34c 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -54,7 +54,7 @@ err: return NULL; } -static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) +static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { struct sync_file *sync_file; @@ -71,7 +71,7 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) * takes ownership of @fence. The sync_file can be released with * fput(sync_file->file). Returns the sync_file or NULL in case of error. */ -struct sync_file *sync_file_create(struct fence *fence) +struct sync_file *sync_file_create(struct dma_fence *fence) { struct sync_file *sync_file; @@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence) if (!sync_file) return NULL; - sync_file->fence = fence_get(fence); + sync_file->fence = dma_fence_get(fence); snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), @@ -121,16 +121,16 @@ err: * Ensures @fd references a valid sync_file and returns a fence that * represents all fence in the sync_file. On error NULL is returned. */ -struct fence *sync_file_get_fence(int fd) +struct dma_fence *sync_file_get_fence(int fd) { struct sync_file *sync_file; - struct fence *fence; + struct dma_fence *fence; sync_file = sync_file_fdget(fd); if (!sync_file) return NULL; - fence = fence_get(sync_file->fence); + fence = dma_fence_get(sync_file->fence); fput(sync_file->file); return fence; @@ -138,22 +138,23 @@ struct fence *sync_file_get_fence(int fd) EXPORT_SYMBOL(sync_file_get_fence); static int sync_file_set_fence(struct sync_file *sync_file, - struct fence **fences, int num_fences) + struct dma_fence **fences, int num_fences) { - struct fence_array *array; + struct dma_fence_array *array; /* * The reference for the fences in the new sync_file and held * in add_fence() during the merge procedure, so for num_fences == 1 * we already own a new reference to the fence. For num_fence > 1 - * we own the reference of the fence_array creation. + * we own the reference of the dma_fence_array creation. */ if (num_fences == 1) { sync_file->fence = fences[0]; kfree(fences); } else { - array = fence_array_create(num_fences, fences, - fence_context_alloc(1), 1, false); + array = dma_fence_array_create(num_fences, fences, + dma_fence_context_alloc(1), + 1, false); if (!array) return -ENOMEM; @@ -163,10 +164,11 @@ static int sync_file_set_fence(struct sync_file *sync_file, return 0; } -static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) +static struct dma_fence **get_fences(struct sync_file *sync_file, + int *num_fences) { - if (fence_is_array(sync_file->fence)) { - struct fence_array *array = to_fence_array(sync_file->fence); + if (dma_fence_is_array(sync_file->fence)) { + struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); *num_fences = array->num_fences; return array->fences; @@ -176,12 +178,13 @@ static struct fence **get_fences(struct sync_file *sync_file, int *num_fences) return &sync_file->fence; } -static void add_fence(struct fence **fences, int *i, struct fence *fence) +static void add_fence(struct dma_fence **fences, + int *i, struct dma_fence *fence) { fences[*i] = fence; - if (!fence_is_signaled(fence)) { - fence_get(fence); + if (!dma_fence_is_signaled(fence)) { + dma_fence_get(fence); (*i)++; } } @@ -200,7 +203,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct fence **fences, **nfences, **a_fences, **b_fences; + struct dma_fence **fences, **nfences, **a_fences, **b_fences; int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); @@ -226,8 +229,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * and sync_file_create, this is a reasonable assumption. */ for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { - struct fence *pt_a = a_fences[i_a]; - struct fence *pt_b = b_fences[i_b]; + struct dma_fence *pt_a = a_fences[i_a]; + struct dma_fence *pt_b = b_fences[i_b]; if (pt_a->context < pt_b->context) { add_fence(fences, &i, pt_a); @@ -255,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, add_fence(fences, &i, b_fences[i_b]); if (i == 0) - fences[i++] = fence_get(a_fences[0]); + fences[i++] = dma_fence_get(a_fences[0]); if (num_fences > i) { nfences = krealloc(fences, i * sizeof(*fences), @@ -286,8 +289,8 @@ static void sync_file_free(struct kref *kref) kref); if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) - fence_remove_callback(sync_file->fence, &sync_file->cb); - fence_put(sync_file->fence); + dma_fence_remove_callback(sync_file->fence, &sync_file->cb); + dma_fence_put(sync_file->fence); kfree(sync_file); } @@ -307,12 +310,12 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) if (!poll_does_not_wait(wait) && !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { - if (fence_add_callback(sync_file->fence, &sync_file->cb, - fence_check_cb_func) < 0) + if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, + fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); } - return fence_is_signaled(sync_file->fence) ? POLLIN : 0; + return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; } static long sync_file_ioctl_merge(struct sync_file *sync_file, @@ -370,14 +373,14 @@ err_put_fd: return err; } -static void sync_fill_fence_info(struct fence *fence, +static void sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); strlcpy(info->driver_name, fence->ops->get_driver_name(fence), sizeof(info->driver_name)); - if (fence_is_signaled(fence)) + if (dma_fence_is_signaled(fence)) info->status = fence->status >= 0 ? 1 : fence->status; else info->status = 0; @@ -389,7 +392,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, { struct sync_file_info info; struct sync_fence_info *fence_info = NULL; - struct fence **fences; + struct dma_fence **fences; __u32 size; int num_fences, ret, i; @@ -429,7 +432,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, no_fences: strlcpy(info.name, sync_file->name, sizeof(info.name)); - info.status = fence_is_signaled(sync_file->fence); + info.status = dma_fence_is_signaled(sync_file->fence); info.num_fences = num_fences; if (copy_to_user((void __user *)arg, &info, sizeof(info))) |