summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-03-15 12:44:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-03-15 12:44:23 -0700
commit52ac3777fc4514b17c0c4c548f941207a0fc06a7 (patch)
tree1ecbbee5fa941796ab65b609ed0a864f86a5378c
parentb67775e124572a7028e930c306ed68cc2f90b29b (diff)
parent59b5809655bdafb0767d3fd00a3e41711aab07e6 (diff)
downloadlinux-52ac3777fc4514b17c0c4c548f941207a0fc06a7.tar.gz
linux-52ac3777fc4514b17c0c4c548f941207a0fc06a7.tar.bz2
linux-52ac3777fc4514b17c0c4c548f941207a0fc06a7.zip
Merge tag 'ras-urgent-2020-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RAS fixes from Thomas Gleixner: "Two RAS related fixes: - Shut down the per CPU thermal throttling poll work properly when a CPU goes offline. The missing shutdown caused the poll work to be migrated to a unbound worker which triggered warnings about the usage of smp_processor_id() in preemptible context - Fix the PPIN feature initialization which missed to enable the functionality when PPIN_CTL was enabled but the MSR locked against updates" * tag 'ras-urgent-2020-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mce: Fix logic and comments around MSR_PPIN_CTL x86/mce/therm_throt: Undo thermal polling properly on CPU offline
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c9
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c9
2 files changed, 12 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 5627b1091b85..f996ffb887bc 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
return;
if ((val & 3UL) == 1UL) {
- /* PPIN available but disabled: */
+ /* PPIN locked in disabled mode */
return;
}
- /* If PPIN is disabled, but not locked, try to enable: */
- if (!(val & 3UL)) {
+ /* If PPIN is disabled, try to enable */
+ if (!(val & 2UL)) {
wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
rdmsrl_safe(MSR_PPIN_CTL, &val);
}
- if ((val & 3UL) == 2UL)
+ /* Is the enable bit set? */
+ if (val & 2UL)
set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
}
}
diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index 58b4ee3cda77..f36dc0742085 100644
--- a/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
{
struct thermal_state *state = &per_cpu(thermal_state, cpu);
struct device *dev = get_cpu_device(cpu);
+ u32 l;
+
+ /* Mask the thermal vector before draining evtl. pending work */
+ l = apic_read(APIC_LVTTHMR);
+ apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
- cancel_delayed_work(&state->package_throttle.therm_work);
- cancel_delayed_work(&state->core_throttle.therm_work);
+ cancel_delayed_work_sync(&state->package_throttle.therm_work);
+ cancel_delayed_work_sync(&state->core_throttle.therm_work);
state->package_throttle.rate_control_active = false;
state->core_throttle.rate_control_active = false;