summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2023-03-20 07:43:31 -0700
committerRob Clark <robdclark@chromium.org>2023-03-25 16:31:44 -0700
commit17b704f1c0fb9150551567cb7a5414fb761b57ea (patch)
tree04a5576c88cec987b31d6ebdb549ce893367174d /drivers/gpu/drm/msm/msm_gem.c
parent6c7c8fb863f7c3137ce5fc4b435af3891083abeb (diff)
downloadlinux-stable-17b704f1c0fb9150551567cb7a5414fb761b57ea.tar.gz
linux-stable-17b704f1c0fb9150551567cb7a5414fb761b57ea.tar.bz2
linux-stable-17b704f1c0fb9150551567cb7a5414fb761b57ea.zip
drm/msm/gem: Avoid obj lock in job_run()
Now that everything that controls which LRU an obj lives in *except* the backing pages is protected by the LRU lock, add a special path to unpin in the job_run() path, where we are assured that we already have backing pages and will not be racing against eviction (because the GEM object's dma_resv contains the fence that will be signaled when the submit/job completes). Signed-off-by: Rob Clark <robdclark@chromium.org> Patchwork: https://patchwork.freedesktop.org/patch/527845/ Link: https://lore.kernel.org/r/20230320144356.803762-10-robdclark@gmail.com
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c44
1 files changed, 37 insertions, 7 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b9171ac1a79a..9008f967637a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -61,18 +61,14 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
-static void update_lru_locked(struct drm_gem_object *obj)
+static void update_lru_active(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- msm_gem_assert_locked(&msm_obj->base);
-
- if (!msm_obj->pages) {
- GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(!msm_obj->pages);
- drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
- } else if (msm_obj->pin_count) {
+ if (msm_obj->pin_count) {
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
@@ -83,6 +79,22 @@ static void update_lru_locked(struct drm_gem_object *obj)
}
}
+static void update_lru_locked(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_assert_locked(&msm_obj->base);
+
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+
+ drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
+ } else {
+ update_lru_active(obj);
+ }
+}
+
static void update_lru(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
@@ -489,6 +501,24 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
mutex_unlock(&priv->lru.lock);
}
+/* Special unpin path for use in fence-signaling path, avoiding the need
+ * to hold the obj lock by only depending on things that a protected by
+ * the LRU lock. In particular we know that that we already have backing
+ * and and that the object's dma_resv has the fence for the current
+ * submit/job which will prevent us racing against page eviction.
+ */
+void msm_gem_unpin_active(struct drm_gem_object *obj)
+{
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&priv->lru.lock);
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_lru_active(obj);
+ mutex_unlock(&priv->lru.lock);
+}
+
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{