diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-03-24 15:19:56 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2025-03-24 15:19:56 +0100 |
commit | 5a98796468e655048a3b37bf94cbe9a36ce58e32 (patch) | |
tree | 654be161d17530ae569256434e1bc2d4cca56a9a | |
parent | 871302441256b3d34a0625d33014c882036258cc (diff) | |
parent | 3860cbe239639503e56bd4365c6bf4cb957ef04e (diff) | |
download | linux-5a98796468e655048a3b37bf94cbe9a36ce58e32.tar.gz linux-5a98796468e655048a3b37bf94cbe9a36ce58e32.tar.bz2 linux-5a98796468e655048a3b37bf94cbe9a36ce58e32.zip |
Merge branch 'pm-sleep'
Merge updates related to system sleep for 6.15-rc1 including fixes,
cleanups and a rework of the "smart suspend" driver flag handling to
avoid issues that may occur when drivers using it depend on some other
drivers:
- Rework the handling of the "smart suspend" driver flag in the PM core
to avoid issues hat may occur when drivers using it depend on some
other drivers and clean up the related PM core code (Rafael Wysocki,
Colin Ian King).
- Fix the handling of devices with the power.direct_complete flag set
if device_suspend() returns an error for at least one device to avoid
situations in which some of them may not be resumed (Rafael Wysocki).
- Use mutex_trylock() in hibernate_compressor_param_set() to avoid a
possible deadlock that may occur if the "compressor" hibernation
module parameter is accessed during the registration of a new
ieee80211 device (Lizhi Xu).
- Suppress sleeping parent warning in device_pm_add() in the case when
new children are added under a device with the power.direct_complete
set after it has been processed by device_resume() (Xu Yang).
- Remove needless return in three void functions related to system
wakeup (Zijun Hu).
- Replace deprecated kmap_atomic() with kmap_local_page() in the
hibernation core code (David Reaver).
- Remove unused helper functions related to system sleep (David Alan
Gilbert).
- Clean up s2idle_enter() so it does not lock and unlock CPU offline
in vain and update comments in it (Ulf Hansson).
- Clean up broken white space in dpm_wait_for_children() (Geert
Uytterhoeven).
* pm-sleep:
PM: sleep: Fix bit masking operation
PM: sleep: Fix handling devices with direct_complete set on errors
PM: sleep: core: Fix indentation in dpm_wait_for_children()
PM: s2idle: Extend comment in s2idle_enter()
PM: s2idle: Drop redundant locks when entering s2idle
PM: sleep: Remove unused pm_generic_ wrappers
PM: sleep: Rearrange dpm_async_fn() and async state clearing
PM: sleep: Rename power.async_in_progress to power.work_in_progress
PM: core: Tweak pm_runtime_block_if_disabled() return value
PM: runtime: Convert pm_runtime_blocked() to static inline
PM: sleep: Update power.smart_suspend under PM spinlock
PM: sleep: Adjust check before setting power.must_resume
PM: wakeup: Remove needless return in three void APIs
PM: sleep: Suppress sleeping parent warning in special case
PM: hibernate: Avoid deadlock in hibernate_compressor_param_set()
PM: sleep: Avoid unnecessary checks in device_prepare_smart_suspend()
PM: sleep: Use DPM_FLAG_SMART_SUSPEND conditionally
PM: runtime: Introduce pm_runtime_blocked()
PM: Block enabling of runtime PM during system suspend
PM: hibernate: Replace deprecated kmap_atomic() with kmap_local_page()
-rw-r--r-- | drivers/acpi/device_pm.c | 4 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 24 | ||||
-rw-r--r-- | drivers/base/power/main.c | 161 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 31 | ||||
-rw-r--r-- | drivers/mfd/intel-lpss.c | 2 | ||||
-rw-r--r-- | drivers/pci/pci-driver.c | 6 | ||||
-rw-r--r-- | include/linux/device.h | 9 | ||||
-rw-r--r-- | include/linux/pm.h | 9 | ||||
-rw-r--r-- | include/linux/pm_runtime.h | 18 | ||||
-rw-r--r-- | include/linux/pm_wakeup.h | 6 | ||||
-rw-r--r-- | kernel/power/hibernate.c | 6 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 16 | ||||
-rw-r--r-- | kernel/power/suspend.c | 14 |
13 files changed, 205 insertions, 101 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 3b4d048c4941..dbd4446025ec 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1161,7 +1161,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete); */ int acpi_subsys_suspend(struct device *dev) { - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || + if (!dev_pm_smart_suspend(dev) || acpi_dev_needs_resume(dev, ACPI_COMPANION(dev))) pm_runtime_resume(dev); @@ -1320,7 +1320,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_restore_early); */ int acpi_subsys_poweroff(struct device *dev) { - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || + if (!dev_pm_smart_suspend(dev) || acpi_dev_needs_resume(dev, ACPI_COMPANION(dev))) pm_runtime_resume(dev); diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4fa525668cb7..6502720bb564 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); /** - * pm_generic_freeze_late - Generic freeze_late callback for subsystems. - * @dev: Device to freeze. - */ -int pm_generic_freeze_late(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_freeze_late); - -/** * pm_generic_freeze - Generic freeze callback for subsystems. * @dev: Device to freeze. */ @@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); /** - * pm_generic_thaw_early - Generic thaw_early callback for subsystems. - * @dev: Device to thaw. - */ -int pm_generic_thaw_early(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; -} -EXPORT_SYMBOL_GPL(pm_generic_thaw_early); - -/** * pm_generic_thaw - Generic thaw callback for subsystems. * @dev: Device to thaw. */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index dffa2aa1ba7d..ac2a197c1234 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr) static void dpm_wait_for_children(struct device *dev, bool async) { - device_for_each_child(dev, &async, dpm_wait_fn); + device_for_each_child(dev, &async, dpm_wait_fn); } static void dpm_wait_for_suppliers(struct device *dev, bool async) @@ -599,27 +599,34 @@ static bool is_async(struct device *dev) static bool dpm_async_fn(struct device *dev, async_func_t func) { - reinit_completion(&dev->power.completion); + if (!is_async(dev)) + return false; - if (is_async(dev)) { - dev->power.async_in_progress = true; + dev->power.work_in_progress = true; - get_device(dev); + get_device(dev); + + if (async_schedule_dev_nocall(func, dev)) + return true; - if (async_schedule_dev_nocall(func, dev)) - return true; + put_device(dev); - put_device(dev); - } /* - * Because async_schedule_dev_nocall() above has returned false or it - * has not been called at all, func() is not running and it is safe to - * update the async_in_progress flag without extra synchronization. + * async_schedule_dev_nocall() above has returned false, so func() is + * not running and it is safe to update power.work_in_progress without + * extra synchronization. */ - dev->power.async_in_progress = false; + dev->power.work_in_progress = false; + return false; } +static void dpm_clear_async_state(struct device *dev) +{ + reinit_completion(&dev->power.completion); + dev->power.work_in_progress = false; +} + /** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. @@ -656,15 +663,13 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy * so change its status accordingly. * * Otherwise, the device is going to be resumed, so set its PM-runtime - * status to "active" unless its power.set_active flag is clear, in + * status to "active" unless its power.smart_suspend flag is clear, in * which case it is not necessary to update its PM-runtime status. */ - if (skip_resume) { + if (skip_resume) pm_runtime_set_suspended(dev); - } else if (dev->power.set_active) { + else if (dev_pm_smart_suspend(dev)) pm_runtime_set_active(dev); - dev->power.set_active = false; - } if (dev->pm_domain) { info = "noirq power domain "; @@ -731,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) + list_for_each_entry(dev, &dpm_noirq_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_noirq); + } while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); list_move_tail(&dev->power.entry, &dpm_late_early_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -871,14 +878,16 @@ void dpm_resume_early(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) + list_for_each_entry(dev, &dpm_late_early_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume_early); + } while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); list_move_tail(&dev->power.entry, &dpm_suspended_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -929,7 +938,17 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (!dev->power.is_suspended) + goto Complete; + if (dev->power.direct_complete) { + /* + * Allow new children to be added under the device after this + * point if it has no PM callbacks. + */ + if (dev->power.no_pm_callbacks) + dev->power.is_prepared = false; + /* Match the pm_runtime_disable() in device_suspend(). */ pm_runtime_enable(dev); goto Complete; @@ -947,9 +966,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false; - if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -989,7 +1005,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async) error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false; - Unlock: device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1037,14 +1052,16 @@ void dpm_resume(pm_message_t state) * Trigger the resume of "async" devices upfront so they don't have to * wait for the "non-async" ones they don't depend on. */ - list_for_each_entry(dev, &dpm_suspended_list, power.entry) + list_for_each_entry(dev, &dpm_suspended_list, power.entry) { + dpm_clear_async_state(dev); dpm_async_fn(dev, async_resume); + } while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); list_move_tail(&dev->power.entry, &dpm_prepared_list); - if (!dev->power.async_in_progress) { + if (!dev->power.work_in_progress) { get_device(dev); mutex_unlock(&dpm_list_mtx); @@ -1109,6 +1126,8 @@ static void device_complete(struct device *dev, pm_message_t state) device_unlock(dev); out: + /* If enabling runtime PM for the device is blocked, unblock it. */ + pm_runtime_unblock(dev); pm_runtime_put(dev); } @@ -1270,24 +1289,17 @@ Skip: dev->power.is_noirq_suspended = true; /* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true; - if (dev->power.must_resume) { - if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) { - dev->power.set_active = true; - if (dev->parent && !dev->parent->power.ignore_children) - dev->parent->power.set_active = true; - } + if (dev->power.must_resume) dpm_superior_set_must_resume(dev); - } Complete: complete_all(&dev->power.completion); @@ -1320,6 +1332,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state) list_move(&dev->power.entry, &dpm_noirq_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_noirq)) continue; @@ -1497,6 +1510,7 @@ int dpm_suspend_late(pm_message_t state) list_move(&dev->power.entry, &dpm_late_early_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend_late)) continue; @@ -1654,6 +1668,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; } @@ -1764,6 +1779,7 @@ int dpm_suspend(pm_message_t state) list_move(&dev->power.entry, &dpm_suspended_list); + dpm_clear_async_state(dev); if (dpm_async_fn(dev, async_suspend)) continue; @@ -1795,6 +1811,46 @@ int dpm_suspend(pm_message_t state) return error; } +static bool device_prepare_smart_suspend(struct device *dev) +{ + struct device_link *link; + bool ret = true; + int idx; + + /* + * The "smart suspend" feature is enabled for devices whose drivers ask + * for it and for devices without PM callbacks. + * + * However, if "smart suspend" is not enabled for the device's parent + * or any of its suppliers that take runtime PM into account, it cannot + * be enabled for the device either. + */ + if (!dev->power.no_pm_callbacks && + !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + return false; + + if (dev->parent && !dev_pm_smart_suspend(dev->parent) && + !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent)) + return false; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + continue; + + if (!dev_pm_smart_suspend(link->supplier) && + !pm_runtime_blocked(link->supplier)) { + ret = false; + break; + } + } + + device_links_read_unlock(idx); + + return ret; +} + /** * device_prepare - Prepare a device for system power transition. * @dev: Device to handle. @@ -1806,6 +1862,7 @@ int dpm_suspend(pm_message_t state) static int device_prepare(struct device *dev, pm_message_t state) { int (*callback)(struct device *) = NULL; + bool smart_suspend; int ret = 0; /* @@ -1815,6 +1872,13 @@ static int device_prepare(struct device *dev, pm_message_t state) * it again during the complete phase. */ pm_runtime_get_noresume(dev); + /* + * If runtime PM is disabled for the device at this point and it has + * never been enabled so far, it should not be enabled until this system + * suspend-resume cycle is complete, so prepare to trigger a warning on + * subsequent attempts to enable it. + */ + smart_suspend = !pm_runtime_block_if_disabled(dev); if (dev->power.syscore) return 0; @@ -1849,6 +1913,13 @@ unlock: pm_runtime_put(dev); return ret; } + /* Do not enable "smart suspend" for devices with disabled runtime PM. */ + if (smart_suspend) + smart_suspend = device_prepare_smart_suspend(dev); + + spin_lock_irq(&dev->power.lock); + + dev->power.smart_suspend = smart_suspend; /* * A positive return value from ->prepare() means "this device appears * to be runtime-suspended and its state is fine, so if it really is @@ -1856,11 +1927,12 @@ unlock: * will do the same thing with all of its descendants". This only * applies to suspend transitions, however. */ - spin_lock_irq(&dev->power.lock); dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && (ret > 0 || dev->power.no_pm_callbacks) && !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); + spin_unlock_irq(&dev->power.lock); + return 0; } @@ -2024,6 +2096,5 @@ void device_pm_check_callbacks(struct device *dev) bool dev_pm_skip_suspend(struct device *dev) { - return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && - pm_runtime_status_suspended(dev); + return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index da74e1c69f7a..9589ccb0fda2 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1468,6 +1468,31 @@ int pm_runtime_barrier(struct device *dev) } EXPORT_SYMBOL_GPL(pm_runtime_barrier); +bool pm_runtime_block_if_disabled(struct device *dev) +{ + bool ret; + + spin_lock_irq(&dev->power.lock); + + ret = !pm_runtime_enabled(dev); + if (ret && dev->power.last_status == RPM_INVALID) + dev->power.last_status = RPM_BLOCKED; + + spin_unlock_irq(&dev->power.lock); + + return ret; +} + +void pm_runtime_unblock(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + + if (dev->power.last_status == RPM_BLOCKED) + dev->power.last_status = RPM_INVALID; + + spin_unlock_irq(&dev->power.lock); +} + void __pm_runtime_disable(struct device *dev, bool check_resume) { spin_lock_irq(&dev->power.lock); @@ -1526,6 +1551,10 @@ void pm_runtime_enable(struct device *dev) if (--dev->power.disable_depth > 0) goto out; + if (dev->power.last_status == RPM_BLOCKED) { + dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n"); + dump_stack(); + } dev->power.last_status = RPM_INVALID; dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); @@ -1868,7 +1897,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 3ba05ebb9035..63d6694f7145 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -480,7 +480,7 @@ EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, "INTEL_LPSS"); static int resume_lpss_device(struct device *dev, void *data) { - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + if (!dev_pm_smart_suspend(dev)) pm_runtime_resume(dev); return 0; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index f57ea36d125d..02726f36beb5 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -812,8 +812,7 @@ static int pci_pm_suspend(struct device *dev) * suspend callbacks can cope with runtime-suspended devices, it is * better to resume the device from runtime suspend here. */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || - pci_dev_need_resume(pci_dev)) { + if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); pci_dev->state_saved = false; } else { @@ -1151,8 +1150,7 @@ static int pci_pm_poweroff(struct device *dev) } /* The reason to do that is the same as in pci_pm_suspend(). */ - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || - pci_dev_need_resume(pci_dev)) { + if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); pci_dev->state_saved = false; } else { diff --git a/include/linux/device.h b/include/linux/device.h index 80a5b3268986..615282365052 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1025,6 +1025,15 @@ static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags) return !!(dev->power.driver_flags & flags); } +static inline bool dev_pm_smart_suspend(struct device *dev) +{ +#ifdef CONFIG_PM_SLEEP + return dev->power.smart_suspend; +#else + return false; +#endif +} + static inline void device_lock(struct device *dev) { mutex_lock(&dev->mutex); diff --git a/include/linux/pm.h b/include/linux/pm.h index 78855d794342..f0bd8fbae4f2 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -597,6 +597,7 @@ enum rpm_status { RPM_RESUMING, RPM_SUSPENDED, RPM_SUSPENDING, + RPM_BLOCKED, }; /* @@ -678,9 +679,9 @@ struct dev_pm_info { bool wakeup_path:1; bool syscore:1; bool no_pm_callbacks:1; /* Owned by the PM core */ - bool async_in_progress:1; /* Owned by the PM core */ + bool work_in_progress:1; /* Owned by the PM core */ + bool smart_suspend:1; /* Owned by the PM core */ bool must_resume:1; /* Owned by the PM core */ - bool set_active:1; /* Owned by the PM core */ bool may_skip_resume:1; /* Set by subsystems */ #else bool should_wakeup:1; @@ -838,10 +839,8 @@ extern int pm_generic_resume_early(struct device *dev); extern int pm_generic_resume_noirq(struct device *dev); extern int pm_generic_resume(struct device *dev); extern int pm_generic_freeze_noirq(struct device *dev); -extern int pm_generic_freeze_late(struct device *dev); extern int pm_generic_freeze(struct device *dev); extern int pm_generic_thaw_noirq(struct device *dev); -extern int pm_generic_thaw_early(struct device *dev); extern int pm_generic_thaw(struct device *dev); extern int pm_generic_restore_noirq(struct device *dev); extern int pm_generic_restore_early(struct device *dev); @@ -883,10 +882,8 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void #define pm_generic_resume_noirq NULL #define pm_generic_resume NULL #define pm_generic_freeze_noirq NULL -#define pm_generic_freeze_late NULL #define pm_generic_freeze NULL #define pm_generic_thaw_noirq NULL -#define pm_generic_thaw_early NULL #define pm_generic_thaw NULL #define pm_generic_restore_noirq NULL #define pm_generic_restore_early NULL diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 72c62e1171ca..7fb5a459847e 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work) extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); +extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); @@ -77,6 +78,8 @@ extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); +extern bool pm_runtime_block_if_disabled(struct device *dev); +extern void pm_runtime_unblock(struct device *dev); extern void pm_runtime_enable(struct device *dev); extern void __pm_runtime_disable(struct device *dev, bool check_resume); extern void pm_runtime_allow(struct device *dev); @@ -197,6 +200,17 @@ static inline bool pm_runtime_enabled(struct device *dev) } /** + * pm_runtime_blocked - Check if runtime PM enabling is blocked. + * @dev: Target device. + * + * Do not call this function outside system suspend/resume code paths. + */ +static inline bool pm_runtime_blocked(struct device *dev) +{ + return dev->power.last_status == RPM_BLOCKED; +} + +/** * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. * @dev: Target device. * @@ -241,6 +255,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; } static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; } @@ -271,8 +286,11 @@ static inline int pm_runtime_get_if_active(struct device *dev) static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } +static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; } +static inline void pm_runtime_unblock(struct device *dev) {} static inline void pm_runtime_enable(struct device *dev) {} static inline void __pm_runtime_disable(struct device *dev, bool c) {} +static inline bool pm_runtime_blocked(struct device *dev) { return true; } static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index d501c09c60cd..51e0e8dd5f9e 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -205,17 +205,17 @@ static inline void device_set_awake_path(struct device *dev) static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { - return pm_wakeup_ws_event(ws, msec, false); + pm_wakeup_ws_event(ws, msec, false); } static inline void pm_wakeup_event(struct device *dev, unsigned int msec) { - return pm_wakeup_dev_event(dev, msec, false); + pm_wakeup_dev_event(dev, msec, false); } static inline void pm_wakeup_hard_event(struct device *dev) { - return pm_wakeup_dev_event(dev, 0, true); + pm_wakeup_dev_event(dev, 0, true); } /** diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 10a01af63a80..b129ed1d25a8 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1446,10 +1446,10 @@ static const char * const comp_alg_enabled[] = { static int hibernate_compressor_param_set(const char *compressor, const struct kernel_param *kp) { - unsigned int sleep_flags; int index, ret; - sleep_flags = lock_system_sleep(); + if (!mutex_trylock(&system_transition_mutex)) + return -EBUSY; index = sysfs_match_string(comp_alg_enabled, compressor); if (index >= 0) { @@ -1461,7 +1461,7 @@ static int hibernate_compressor_param_set(const char *compressor, ret = index; } - unlock_system_sleep(sleep_flags); + mutex_unlock(&system_transition_mutex); if (ret) pr_debug("Cannot set specified compressor %s\n", diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index c9fb559a6399..4e6e24e8b854 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -2270,9 +2270,9 @@ int snapshot_read_next(struct snapshot_handle *handle) */ void *kaddr; - kaddr = kmap_atomic(page); + kaddr = kmap_local_page(page); copy_page(buffer, kaddr); - kunmap_atomic(kaddr); + kunmap_local(kaddr); handle->buffer = buffer; } else { handle->buffer = page_address(page); @@ -2561,9 +2561,9 @@ static void copy_last_highmem_page(void) if (last_highmem_page) { void *dst; - dst = kmap_atomic(last_highmem_page); + dst = kmap_local_page(last_highmem_page); copy_page(dst, buffer); - kunmap_atomic(dst); + kunmap_local(dst); last_highmem_page = NULL; } } @@ -2881,13 +2881,13 @@ static inline void swap_two_pages_data(struct page *p1, struct page *p2, { void *kaddr1, *kaddr2; - kaddr1 = kmap_atomic(p1); - kaddr2 = kmap_atomic(p2); + kaddr1 = kmap_local_page(p1); + kaddr2 = kmap_local_page(p2); copy_page(buf, kaddr1); copy_page(kaddr1, kaddr2); copy_page(kaddr2, buf); - kunmap_atomic(kaddr2); - kunmap_atomic(kaddr1); + kunmap_local(kaddr2); + kunmap_local(kaddr1); } /** diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 09f8397bae15..6fae1e0a331c 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -91,6 +91,16 @@ static void s2idle_enter(void) { trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true); + /* + * The correctness of the code below depends on the number of online + * CPUs being stable, but CPUs cannot be taken offline or put online + * while it is running. + * + * The s2idle_lock must be acquired before the pending wakeup check to + * prevent pm_system_wakeup() from running as a whole between that check + * and the subsequent s2idle_state update in which case a wakeup event + * would get lost. + */ raw_spin_lock_irq(&s2idle_lock); if (pm_wakeup_pending()) goto out; @@ -98,8 +108,6 @@ static void s2idle_enter(void) s2idle_state = S2IDLE_STATE_ENTER; raw_spin_unlock_irq(&s2idle_lock); - cpus_read_lock(); - /* Push all the CPUs into the idle loop. */ wake_up_all_idle_cpus(); /* Make the current CPU wait so it can enter the idle loop too. */ @@ -112,8 +120,6 @@ static void s2idle_enter(void) */ wake_up_all_idle_cpus(); - cpus_read_unlock(); - raw_spin_lock_irq(&s2idle_lock); out: |