diff options
author | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2024-04-17 16:39:43 -0400 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2024-04-18 08:31:39 -0400 |
commit | cbb6a7413b174637f35354675ecd7e1183091bfa (patch) | |
tree | d9a5cd3a1142b0e3a21bb848c5944c65942e9c41 /drivers | |
parent | 2817a1f1bfb1a2e8a4fb16dd307980216f831c46 (diff) | |
download | linux-stable-cbb6a7413b174637f35354675ecd7e1183091bfa.tar.gz linux-stable-cbb6a7413b174637f35354675ecd7e1183091bfa.tar.bz2 linux-stable-cbb6a7413b174637f35354675ecd7e1183091bfa.zip |
drm/xe: Introduce xe_pm_runtime_get_noresume for inner callers
Let's ensure that we have an option for inner callers that will
raise WARN if device is not active and not protected by outer callers.
Make this also a void function forcing every caller to unconditionally
put the reference back afterwards.
This will be very important for cases where we want to hold the
reference before scheduling a work in a queue. Then the work job
will be responsible for putting it back.
While at this, already convert a case from mem_access_get_ongoing where
it is not checking for the reference and put it back, what would
cause the underflow.
v2: Fix identation.
v3: Convert equivalent missing put from mem_access towards pm_runtime.
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240417203952.25503-1-rodrigo.vivi@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/xe/xe_exec_queue.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/xe_pm.h | 1 |
3 files changed, 23 insertions, 2 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index 71bd52dfebcf..50ec661116a2 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -128,7 +128,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q) * already grabbed the rpm ref outside any sensitive locks. */ if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) - drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); + xe_pm_runtime_get_noresume(xe); return 0; @@ -217,7 +217,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) for (i = 0; i < q->width; ++i) xe_lrc_finish(q->lrc + i); if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) - xe_device_mem_access_put(gt_to_xe(q->gt)); + xe_pm_runtime_put(gt_to_xe(q->gt)); __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index f3fd003b6944..37339bb57229 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -486,6 +486,26 @@ bool xe_pm_runtime_get_if_in_use(struct xe_device *xe) } /** + * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming + * @xe: xe device instance + * + * This function should be used in inner places where it is surely already + * protected by outer-bound callers of `xe_pm_runtime_get`. + * It will warn if not protected. + * The reference should be put back after this function regardless, since it + * will always bump the usage counter, regardless. + */ +void xe_pm_runtime_get_noresume(struct xe_device *xe) +{ + bool ref; + + ref = xe_pm_runtime_get_if_in_use(xe); + + if (drm_WARN(&xe->drm, !ref, "Missing outer runtime PM protection\n")) + pm_runtime_get_noresume(xe->drm.dev); +} + +/** * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake. * @xe: xe device instance * diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h index 0cb38ca244fe..119b630ad1d1 100644 --- a/drivers/gpu/drm/xe/xe_pm.h +++ b/drivers/gpu/drm/xe/xe_pm.h @@ -31,6 +31,7 @@ int xe_pm_runtime_get_ioctl(struct xe_device *xe); void xe_pm_runtime_put(struct xe_device *xe); int xe_pm_runtime_get_if_active(struct xe_device *xe); bool xe_pm_runtime_get_if_in_use(struct xe_device *xe); +void xe_pm_runtime_get_noresume(struct xe_device *xe); bool xe_pm_runtime_resume_and_get(struct xe_device *xe); void xe_pm_assert_unbounded_bridge(struct xe_device *xe); int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); |