summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinay Belgaumkar <vinay.belgaumkar@intel.com>2022-10-24 10:11:08 -0700
committerJohn Harrison <John.C.Harrison@Intel.com>2022-10-26 12:20:25 -0700
commitf864a29afc32d3c0c2d7a34d71b49a8f92306aaa (patch)
tree94b9e5309ac61f7726f5953e81143efee46cf570
parente62f31e1739d33a1a377cc77b0e89aff21f23c4c (diff)
downloadlinux-f864a29afc32d3c0c2d7a34d71b49a8f92306aaa.tar.gz
linux-f864a29afc32d3c0c2d7a34d71b49a8f92306aaa.tar.bz2
linux-f864a29afc32d3c0c2d7a34d71b49a8f92306aaa.zip
drm/i915/slpc: Optmize waitboost for SLPC
Waitboost (when SLPC is enabled) results in a H2G message. This can result in thousands of messages during a stress test and fill up an already full CTB. There is no need to request for boost if min softlimit is equal or greater than it. v2: Add the tracing back, and check requested freq in the worker thread (Tvrtko) v3: Check requested freq in dec_waiters as well v4: Only check min_softlimit against boost_freq. Limit this optimization for server parts for now. v5: min_softlimit can be greater than boost (Ashutosh) Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com> Signed-off-by: Vinay Belgaumkar <vinay.belgaumkar@intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221024171108.14373-1-vinay.belgaumkar@intel.com
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 070005dd0da4..6c34a83c24b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1014,9 +1014,15 @@ void intel_rps_boost(struct i915_request *rq)
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
+ if (slpc->min_freq_softlimit >= slpc->boost_freq)
+ return;
+
/* Return if old value is non zero */
- if (!atomic_fetch_inc(&slpc->num_waiters))
+ if (!atomic_fetch_inc(&slpc->num_waiters)) {
+ GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
+ rq->fence.context, rq->fence.seqno);
schedule_work(&slpc->boost_work);
+ }
return;
}