summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/amd-pstate.c36
-rw-r--r--drivers/cpufreq/cpufreq.c32
-rw-r--r--drivers/cpufreq/intel_pstate.c2
3 files changed, 41 insertions, 29 deletions
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 6789eed1bb5b..b961f3a3b580 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -607,13 +607,16 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
union perf_cached perf = READ_ONCE(cpudata->perf);
perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
- perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
+ } else {
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+ }
- WRITE_ONCE(cpudata->max_limit_freq, policy->max);
- WRITE_ONCE(cpudata->min_limit_freq, policy->min);
WRITE_ONCE(cpudata->perf, perf);
}
@@ -791,16 +794,6 @@ static void amd_perf_ctl_reset(unsigned int cpu)
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
-/*
- * Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
- * due to locking, so queue the work for later.
- */
-static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
-{
- sched_set_itmt_support();
-}
-static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
-
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
@@ -811,14 +804,8 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
cpudata->hw_prefcore = true;
- /*
- * The priorities can be set regardless of whether or not
- * sched_set_itmt_support(true) has been called and it is valid to
- * update them at any time after it has been called.
- */
+ /* Priorities must be initialized before ITMT support can be toggled on. */
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
-
- schedule_work(&sched_prefcore_work);
}
static void amd_pstate_update_limits(unsigned int cpu)
@@ -1193,6 +1180,9 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
+ if (amd_pstate_prefcore)
+ sched_clear_itmt_support();
+
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1235,6 +1225,10 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
+ /* Enable ITMT support once all CPUs have initialized their asym priorities. */
+ if (amd_pstate_prefcore)
+ sched_set_itmt_support();
+
return 0;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3841c9da6cac..acf19b0042bb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -540,8 +540,6 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
{
unsigned int idx;
- target_freq = clamp_val(target_freq, policy->min, policy->max);
-
if (!policy->freq_table)
return target_freq;
@@ -565,7 +563,22 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
+ unsigned int min = READ_ONCE(policy->min);
+ unsigned int max = READ_ONCE(policy->max);
+
+ /*
+ * If this function runs in parallel with cpufreq_set_policy(), it may
+ * read policy->min before the update and policy->max after the update
+ * or the other way around, so there is no ordering guarantee.
+ *
+ * Resolve this by always honoring the max (in case it comes from
+ * thermal throttling or similar).
+ */
+ if (unlikely(min > max))
+ min = max;
+
+ return __resolve_freq(policy, clamp_val(target_freq, min, max),
+ CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2384,6 +2397,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = __resolve_freq(policy, target_freq, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
@@ -2708,11 +2722,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
+ *
+ * Avoid storing intermediate values in policy->max or policy->min and
+ * compiler optimizations around them because they may be accessed
+ * concurrently by cpufreq_driver_resolve_freq() during the update.
*/
- policy->min = new_data.min;
- policy->max = new_data.max;
- policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
- policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
+ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
+ new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
+ WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
+
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4aad79d26c64..f41ed0b9e610 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2209,7 +2209,7 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
u8 cpu_type = c->topo.intel_type;
/*