diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 53 |
1 files changed, 51 insertions, 2 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 349ddbaef796..dec2a5649ac1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); +#define CPPC_MAX_PERF U8_MAX + static void intel_pstate_set_itmt_prio(int cpu) { struct cppc_perf_caps cppc_perf; @@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu) return; /* + * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. + * In this case we can't use CPPC.highest_perf to enable ITMT. + * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. + */ + if (cppc_perf.highest_perf == CPPC_MAX_PERF) + cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); + + /* * The priorities can be set regardless of whether or not * sched_set_itmt_support(true) has been called and it is valid to * update them at any time after it has been called. @@ -1006,9 +1016,22 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu) */ value &= ~GENMASK_ULL(31, 24); value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); - WRITE_ONCE(cpu->hwp_req_cached, value); + /* + * However, make sure that EPP will be set to "performance" when + * the CPU is brought back online again and the "performance" + * scaling algorithm is still in effect. + */ + cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; } + /* + * Clear the desired perf field in the cached HWP request value to + * prevent nonzero desired values from being leaked into the active + * mode. + */ + value &= ~HWP_DESIRED_PERF(~0L); + WRITE_ONCE(cpu->hwp_req_cached, value); + value &= ~GENMASK_ULL(31, 0); min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); @@ -1620,6 +1643,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { unsigned long flags; + if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + return; + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); @@ -1642,6 +1668,7 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } @@ -2342,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), X86_MATCH(SKYLAKE_X, core_funcs), + X86_MATCH(ICELAKE_X, core_funcs), {} }; @@ -3003,6 +3031,27 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) return intel_pstate_cpu_exit(policy); } +static int intel_cpufreq_suspend(struct cpufreq_policy *policy) +{ + intel_pstate_suspend(policy); + + if (hwp_active) { + struct cpudata *cpu = all_cpu_data[policy->cpu]; + u64 value = READ_ONCE(cpu->hwp_req_cached); + + /* + * Clear the desired perf field in MSR_HWP_REQUEST in case + * intel_cpufreq_adjust_perf() is in use and the last value + * written by it may not be suitable. + */ + value &= ~HWP_DESIRED_PERF(~0L); + wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value); + WRITE_ONCE(cpu->hwp_req_cached, value); + } + + return 0; +} + static struct cpufreq_driver intel_cpufreq = { .flags = CPUFREQ_CONST_LOOPS, .verify = intel_cpufreq_verify_policy, @@ -3012,7 +3061,7 @@ static struct cpufreq_driver intel_cpufreq = { .exit = intel_cpufreq_cpu_exit, .offline = intel_cpufreq_cpu_offline, .online = intel_pstate_cpu_online, - .suspend = intel_pstate_suspend, + .suspend = intel_cpufreq_suspend, .resume = intel_pstate_resume, .update_limits = intel_pstate_update_limits, .name = "intel_cpufreq", |