summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2019-03-08 08:17:13 -0800
committerEric Anholt <eric@anholt.net>2019-03-14 09:22:57 -0700
commit7edc3e3b975b5b3b1de313f43670738acbcc8e1d (patch)
tree6638e02736531cf213248528b8362277c83fa72a /drivers/gpu/drm
parentf435fe83d56b8b804c4204246bccba7749f605f9 (diff)
downloadlinux-stable-7edc3e3b975b5b3b1de313f43670738acbcc8e1d.tar.gz
linux-stable-7edc3e3b975b5b3b1de313f43670738acbcc8e1d.tar.bz2
linux-stable-7edc3e3b975b5b3b1de313f43670738acbcc8e1d.zip
drm: Add helpers for locking an array of BO reservations.
Now that we have the reservation object in the GEM object, it's easy to provide a helper for this common case. Noticed while reviewing panfrost and lima drivers. This particular version came out of v3d, which in turn was a copy from vc4. v2: Fix kerneldoc warnings. Signed-off-by: Eric Anholt <eric@anholt.net> Link: https://patchwork.freedesktop.org/patch/msgid/20190308161716.2466-2-eric@anholt.net Acked-by: Rob Herring <robh@kernel.org> (v1)
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_gem.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad124f5a6f4d..388b3742e562 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1233,3 +1233,81 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&objs[j]->resv->lock);
+
+ if (contended != -1 && contended >= i)
+ ww_mutex_unlock(&objs[contended]->resv->lock);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ww_mutex_unlock(&objs[i]->resv->lock);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);