diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-22 07:42:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-22 07:42:36 -0700 |
commit | dfec4a8478e8e81483a94b663f13153bb7796800 (patch) | |
tree | f78f0cac1f93b6a3b2a8f3a2a78fb84c04be97d3 | |
parent | 159127ea8318974b76542f62d6d23ec8bec91a28 (diff) | |
parent | 01ac7c4c2e035bc8d0d47dc880bbc25bf562a648 (diff) | |
download | linux-dfec4a8478e8e81483a94b663f13153bb7796800.tar.gz linux-dfec4a8478e8e81483a94b663f13153bb7796800.tar.bz2 linux-dfec4a8478e8e81483a94b663f13153bb7796800.zip |
Merge tag 'pm-4.19-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull more power management updates from Rafael Wysocki:
"These fix the main idle loop and the menu cpuidle governor, clean up
the latter, fix a mistake in the PCI bus type's support for system
suspend and resume, fix the ondemand and conservative cpufreq
governors, address a build issue in the system wakeup framework and
make the ACPI C-states desciptions less confusing.
Specifics:
- Make the idle loop handle stopped scheduler tick correctly (Rafael
Wysocki).
- Prevent the menu cpuidle governor from letting CPUs spend too much
time in shallow idle states when it is invoked with scheduler tick
stopped and clean it up somewhat (Rafael Wysocki).
- Avoid invoking the platform firmware to make the platform enter the
ACPI S3 sleep state with suspended PCIe root ports which may
confuse the firmware and cause it to crash (Rafael Wysocki).
- Fix sysfs-related race in the ondemand and conservative cpufreq
governors which may cause the system to crash if the governor
module is removed during an update of CPU frequency limits (Henry
Willard).
- Select SRCU when building the system wakeup framework to avoid a
build issue in it (zhangyi).
- Make the descriptions of ACPI C-states vendor-neutral to avoid
confusion (Prarit Bhargava)"
* tag 'pm-4.19-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
cpuidle: menu: Handle stopped tick more aggressively
sched: idle: Avoid retaining the tick when it has been stopped
PCI / ACPI / PM: Resume all bridges on suspend-to-RAM
cpuidle: menu: Update stale polling override comment
cpufreq: governor: Avoid accessing invalid governor_data
x86/ACPI/cstate: Make APCI C1 FFH MWAIT C-state description vendor-neutral
cpuidle: menu: Fix white space
PM / sleep: wakeup: Fix build error caused by missing SRCU support
-rw-r--r-- | arch/x86/kernel/acpi/cstate.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 12 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 45 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 6 | ||||
-rw-r--r-- | kernel/power/Kconfig | 1 | ||||
-rw-r--r-- | kernel/sched/idle.c | 2 |
6 files changed, 43 insertions, 25 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index dde437f5d14f..158ad1483c43 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -108,7 +108,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx) cx->type); } snprintf(cx->desc, - ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x", + ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x", cx->address); out: return retval; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 1d50e97d49f1..6d53f7d9fc7a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) { - struct policy_dbs_info *policy_dbs = policy->governor_data; + struct policy_dbs_info *policy_dbs; + + /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */ + mutex_lock(&gov_dbs_data_mutex); + policy_dbs = policy->governor_data; + if (!policy_dbs) + goto out; mutex_lock(&policy_dbs->update_mutex); cpufreq_policy_apply_limits(policy); gov_update_sample_delay(policy_dbs, 0); - mutex_unlock(&policy_dbs->update_mutex); + +out: + mutex_unlock(&gov_dbs_data_mutex); } EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 1aef60d160eb..110483f0e3fb 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -328,9 +328,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, unsigned int polling_threshold; /* - * We want to default to C1 (hlt), not to busy polling - * unless the timer is happening really really soon, or - * C1's exit latency exceeds the user configured limit. + * Default to a physical idle state, not to busy polling, unless + * a timer is going to trigger really really soon. */ polling_threshold = max_t(unsigned int, 20, s->target_residency); if (data->next_timer_us > polling_threshold && @@ -349,14 +348,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * If the tick is already stopped, the cost of possible short * idle duration misprediction is much higher, because the CPU * may be stuck in a shallow idle state for a long time as a - * result of it. In that case say we might mispredict and try - * to force the CPU into a state for which we would have stopped - * the tick, unless a timer is going to expire really soon - * anyway. + * result of it. In that case say we might mispredict and use + * the known time till the closest timer event for the idle + * state selection. */ if (data->predicted_us < TICK_USEC) - data->predicted_us = min_t(unsigned int, TICK_USEC, - ktime_to_us(delta_next)); + data->predicted_us = ktime_to_us(delta_next); } else { /* * Use the performance multiplier and the user-configurable @@ -381,8 +378,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, continue; if (idx == -1) idx = i; /* first enabled state */ - if (s->target_residency > data->predicted_us) - break; + if (s->target_residency > data->predicted_us) { + if (!tick_nohz_tick_stopped()) + break; + + /* + * If the state selected so far is shallow and this + * state's target residency matches the time till the + * closest timer event, select this one to avoid getting + * stuck in the shallow one for too long. + */ + if (drv->states[idx].target_residency < TICK_USEC && + s->target_residency <= ktime_to_us(delta_next)) + idx = i; + + goto out; + } if (s->exit_latency > latency_req) { /* * If we break out of the loop for latency reasons, use @@ -403,14 +414,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * Don't stop the tick if the selected state is a polling one or if the * expected idle duration is shorter than the tick period length. */ - if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || - expected_interval < TICK_USEC) { + if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || + expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) { unsigned int delta_next_us = ktime_to_us(delta_next); *stop_tick = false; - if (!tick_nohz_tick_stopped() && idx > 0 && - drv->states[idx].target_residency > delta_next_us) { + if (idx > 0 && drv->states[idx].target_residency > delta_next_us) { /* * The tick is not going to be stopped and the target * residency of the state to be returned is not within @@ -418,8 +428,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, * tick, so try to correct that. */ for (i = idx - 1; i >= 0; i--) { - if (drv->states[i].disabled || - dev->states_usage[i].disable) + if (drv->states[i].disabled || + dev->states_usage[i].disable) continue; idx = i; @@ -429,6 +439,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, } } +out: data->last_state_idx = idx; return data->last_state_idx; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 738e3546abb1..c2ab57705043 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -615,13 +615,11 @@ static bool acpi_pci_need_resume(struct pci_dev *dev) /* * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over * system-wide suspend/resume confuses the platform firmware, so avoid - * doing that, unless the bridge has a driver that should take care of - * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint + * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint * devices are expected to be in D3 before invoking the S3 entry path * from the firmware, so they should not be affected by this issue. */ - if (pci_is_bridge(dev) && !dev->driver && - acpi_target_system_state() != ACPI_STATE_S0) + if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) return true; if (!adev || !acpi_device_power_manageable(adev)) diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index e880ca22c5a5..3a6c2f87699e 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -105,6 +105,7 @@ config PM_SLEEP def_bool y depends on SUSPEND || HIBERNATE_CALLBACKS select PM + select SRCU config PM_SLEEP_SMP def_bool y diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 1a3e9bddd17b..16f84142f2f4 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -190,7 +190,7 @@ static void cpuidle_idle_call(void) */ next_state = cpuidle_select(drv, dev, &stop_tick); - if (stop_tick) + if (stop_tick || tick_nohz_tick_stopped()) tick_nohz_idle_stop_tick(); else tick_nohz_idle_retain_tick(); |