summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorJie Zhan <zhanjie9@hisilicon.com>2024-09-29 11:32:13 +0800
committerViresh Kumar <viresh.kumar@linaro.org>2024-10-03 11:07:53 +0530
commitc47195631960b626058c335aec31f186fa854f97 (patch)
treef4d8e6f0b9c2353bba211e3c36320c4f138cf6fb /drivers/cpufreq
parentdfb6e2f51beeaa77b2c07aabe58b833eb02ace81 (diff)
downloadlinux-c47195631960b626058c335aec31f186fa854f97.tar.gz
linux-c47195631960b626058c335aec31f186fa854f97.tar.bz2
linux-c47195631960b626058c335aec31f186fa854f97.zip
cppc_cpufreq: Use desired perf if feedback ctrs are 0 or unchanged
The CPPC performance feedback counters could be 0 or unchanged when the target cpu is in a low-power idle state, e.g. power-gated or clock-gated. When the counters are 0, cppc_cpufreq_get_rate() returns 0 KHz, which makes cpufreq_online() get a false error and fail to generate a cpufreq policy. When the counters are unchanged, the existing cppc_perf_from_fbctrs() returns a cached desired perf, but some platforms may update the real frequency back to the desired perf reg. For the above cases in cppc_cpufreq_get_rate(), get the latest desired perf from the CPPC reg to reflect the frequency because some platforms may update the actual frequency back there; if failed, use the cached desired perf. Fixes: 6a4fec4f6d30 ("cpufreq: cppc: cppc_cpufreq_get_rate() returns zero in all error cases.") Signed-off-by: Jie Zhan <zhanjie9@hisilicon.com> Reviewed-by: Zeng Heng <zengheng4@huawei.com> Reviewed-by: Ionela Voinescu <ionela.voinescu@arm.com> Reviewed-by: Huisong Li <lihuisong@huawei.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c57
1 files changed, 46 insertions, 11 deletions
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1a5ad184d28f..646b466002c6 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -118,6 +118,9 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
&fb_ctrs);
+ if (!perf)
+ return;
+
cppc_fi->prev_perf_fb_ctrs = fb_ctrs;
perf <<= SCHED_CAPACITY_SHIFT;
@@ -724,13 +727,31 @@ static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
delta_delivered = get_delta(fb_ctrs_t1->delivered,
fb_ctrs_t0->delivered);
- /* Check to avoid divide-by zero and invalid delivered_perf */
+ /*
+ * Avoid divide-by zero and unchanged feedback counters.
+ * Leave it for callers to handle.
+ */
if (!delta_reference || !delta_delivered)
- return cpu_data->perf_ctrls.desired_perf;
+ return 0;
return (reference_perf * delta_delivered) / delta_reference;
}
+static int cppc_get_perf_ctrs_sample(int cpu,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+ struct cppc_perf_fb_ctrs *fb_ctrs_t1)
+{
+ int ret;
+
+ ret = cppc_get_perf_ctrs(cpu, fb_ctrs_t0);
+ if (ret)
+ return ret;
+
+ udelay(2); /* 2usec delay between sampling */
+
+ return cppc_get_perf_ctrs(cpu, fb_ctrs_t1);
+}
+
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
@@ -746,18 +767,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpufreq_cpu_put(policy);
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
- if (ret)
- return 0;
-
- udelay(2); /* 2usec delay between sampling */
-
- ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
- if (ret)
- return 0;
+ ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
+ if (ret) {
+ if (ret == -EFAULT)
+ /* Any of the associated CPPC regs is 0. */
+ goto out_invalid_counters;
+ else
+ return 0;
+ }
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
+ if (!delivered_perf)
+ goto out_invalid_counters;
+
+ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
+
+out_invalid_counters:
+ /*
+ * Feedback counters could be unchanged or 0 when a cpu enters a
+ * low-power idle state, e.g. clock-gated or power-gated.
+ * Use desired perf for reflecting frequency. Get the latest register
+ * value first as some platforms may update the actual delivered perf
+ * there; if failed, resort to the cached desired perf.
+ */
+ if (cppc_get_desired_perf(cpu, &delivered_perf))
+ delivered_perf = cpu_data->perf_ctrls.desired_perf;
return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
}