summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma-buf/dma-buf.c5
-rw-r--r--drivers/dma-buf/dma-resv.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--include/linux/dma-resv.h14
15 files changed, 26 insertions, 30 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index ee04fb442015..d419cf90ee73 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -234,7 +234,7 @@ retry:
shared_count = fobj->shared_count;
else
shared_count = 0;
- fence_excl = rcu_dereference(resv->fence_excl);
+ fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
@@ -1382,8 +1382,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
buf_obj->name ?: "");
robj = buf_obj->resv;
- fence = rcu_dereference_protected(robj->fence_excl,
- dma_resv_held(robj));
+ fence = dma_resv_excl_fence(robj);
if (fence)
seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
fence->ops->get_driver_name(fence),
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 6132ba631991..ed7b4e8f002f 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
- struct dma_fence *old_fence = dma_resv_get_excl(obj);
+ struct dma_fence *old_fence = dma_resv_excl_fence(obj);
struct dma_resv_list *old;
u32 i = 0;
@@ -380,7 +380,7 @@ retry:
rcu_read_unlock();
src_list = dma_resv_get_list(dst);
- old = dma_resv_get_excl(dst);
+ old = dma_resv_excl_fence(dst);
write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */
@@ -428,7 +428,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
- fence_excl = rcu_dereference(obj->fence_excl);
+ fence_excl = dma_resv_excl_fence(obj);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
@@ -523,7 +523,7 @@ retry:
rcu_read_lock();
i = -1;
- fence = rcu_dereference(obj->fence_excl);
+ fence = dma_resv_excl_fence(obj);
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
if (!dma_fence_get_rcu(fence))
goto unlock_retry;
@@ -645,7 +645,7 @@ retry:
}
if (!shared_count) {
- struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
+ struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
if (fence_excl) {
ret = dma_resv_test_signaled_single(fence_excl);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 73c76a3e2b12..7d5aaf584634 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -226,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (!amdgpu_vm_ready(vm))
goto out_unlock;
- fence = dma_resv_get_excl(bo->tbo.base.resv);
+ fence = dma_resv_excl_fence(bo->tbo.base.resv);
if (fence) {
amdgpu_bo_fence(bo, fence, true);
fence = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 4e558632a5d2..2bdc9df5c6b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -210,7 +210,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return -EINVAL;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
flist = dma_resv_get_list(resv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index db69f19ab5bc..2237fe5204d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -471,7 +471,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
etnaviv_gem_describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 25235ef630c1..088d375b3395 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -113,8 +113,7 @@ retry:
seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy =
- busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+ args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
/* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->base.resv->fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 56df86e5f740..a5a2a922e3e8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -819,7 +819,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
fobj = dma_resv_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = dma_resv_get_excl(obj->resv);
+ fence = dma_resv_excl_fence(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -1035,7 +1035,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
}
- fence = rcu_dereference(robj->fence_excl);
+ fence = dma_resv_excl_fence(robj);
if (fence)
describe_fence(fence, "Exclusive", m);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c3d20bc80022..520b1ea9d16c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -951,7 +951,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
+ struct dma_fence *fence = dma_resv_excl_fence(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index e5dcbf67de7e..19c096de5bdc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -356,7 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
}
fobj = dma_resv_get_list(resv);
- fence = dma_resv_get_excl(resv);
+ fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 652af7a134bd..406681317419 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
+ work->fence = dma_fence_get(dma_resv_excl_fence(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 5d3302945076..c8a1711325de 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -98,7 +98,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
int r = 0;
/* always sync to the exclusive fence */
- f = dma_resv_get_excl(resv);
+ f = dma_resv_excl_fence(resv);
fence = f ? to_radeon_fence(f) : NULL;
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index dfa9fdbe98da..1f5b1a5c0a09 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = dma_resv_get_excl(bo->tbo.base.resv);
+ f = dma_resv_excl_fence(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ed56520b81d..1752f8e523e7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -262,7 +262,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
rcu_read_lock();
fobj = rcu_dereference(resv->fence);
- fence = rcu_dereference(resv->fence_excl);
+ fence = dma_resv_excl_fence(resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 62ea920addc3..7b45393ad98e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1166,7 +1166,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
if (bo->moving)
dma_fence_put(bo->moving);
bo->moving = dma_fence_get
- (dma_resv_get_excl(bo->base.resv));
+ (dma_resv_excl_fence(bo->base.resv));
}
return 0;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index f32a3d176513..e3a7f740bb06 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -226,22 +226,20 @@ static inline void dma_resv_unlock(struct dma_resv *obj)
}
/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
+ * dma_resv_exclusive - return the object's exclusive fence
* @obj: the reservation object
*
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
+ * Returns the exclusive fence (if any). Caller must either hold the objects
+ * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
+ * or one of the variants of each
*
* RETURNS
* The exclusive fence or NULL
*/
static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
+dma_resv_excl_fence(struct dma_resv *obj)
{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
+ return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
}
/**