summaryrefslogtreecommitdiffstats
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/clock_ops.c73
-rw-r--r--drivers/base/power/generic_ops.c24
-rw-r--r--drivers/base/power/main.c167
-rw-r--r--drivers/base/power/runtime.c91
-rw-r--r--drivers/base/power/wakeup.c6
5 files changed, 181 insertions, 180 deletions
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e18ba676cdf6..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- pm_clk_list_lock(psd);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- pm_clk_list_unlock(psd);
- return;
-
- remove:
- list_del(&ce->node);
- if (ce->enabled_when_prepared)
- psd->clock_op_might_sleep--;
- pm_clk_list_unlock(psd);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4fa525668cb7..6502720bb564 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
@@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 40e1d8d8a589..c8b0a9e29ed8 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -559,7 +559,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
{
struct timer_list *timer = &wd->timer;
- del_timer_sync(timer);
+ timer_delete_sync(timer);
destroy_timer_on_stack(timer);
}
#else
@@ -599,27 +599,34 @@ static bool is_async(struct device *dev)
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- reinit_completion(&dev->power.completion);
+ if (!is_async(dev))
+ return false;
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
+ dev->power.work_in_progress = true;
- get_device(dev);
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ put_device(dev);
- put_device(dev);
- }
/*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
+ * async_schedule_dev_nocall() above has returned false, so func() is
+ * not running and it is safe to update power.work_in_progress without
+ * extra synchronization.
*/
- dev->power.async_in_progress = false;
+ dev->power.work_in_progress = false;
+
return false;
}
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -656,15 +663,13 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active" unless its power.set_active flag is clear, in
+ * status to "active" unless its power.smart_suspend flag is clear, in
* which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume) {
+ if (skip_resume)
pm_runtime_set_suspended(dev);
- } else if (dev->power.set_active) {
+ else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
- dev->power.set_active = false;
- }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -731,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_noirq);
+ }
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -871,14 +878,16 @@ void dpm_resume_early(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_early);
+ }
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -929,7 +938,17 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
if (dev->power.direct_complete) {
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
@@ -947,9 +966,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -989,7 +1005,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1037,14 +1052,16 @@ void dpm_resume(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume);
+ }
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1109,6 +1126,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1270,24 +1289,17 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
- * Skipping the resume of devices that were in use right before the
- * system suspend (as indicated by their PM-runtime usage counters)
- * would be suboptimal. Also resume them if doing that is not allowed
- * to be skipped.
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
*/
- if (atomic_read(&dev->power.usage_count) > 1 ||
- !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
- dev->power.may_skip_resume))
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
- if (dev->power.must_resume) {
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
- dev->power.set_active = true;
- if (dev->parent && !dev->parent->power.ignore_children)
- dev->parent->power.set_active = true;
- }
+ if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
- }
Complete:
complete_all(&dev->power.completion);
@@ -1320,6 +1332,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1404,6 +1417,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
__pm_runtime_disable(dev, false);
dpm_wait_for_subordinate(dev, async);
@@ -1493,6 +1510,7 @@ int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1650,6 +1668,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
}
@@ -1760,6 +1779,7 @@ int dpm_suspend(pm_message_t state)
list_move(&dev->power.entry, &dpm_suspended_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1791,6 +1811,46 @@ int dpm_suspend(pm_message_t state)
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1802,6 +1862,7 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
/*
@@ -1811,6 +1872,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
@@ -1845,6 +1913,13 @@ unlock:
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1852,11 +1927,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -2020,6 +2096,5 @@ void device_pm_check_callbacks(struct device *dev)
bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..0e127b0329c0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -448,8 +448,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -725,21 +736,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -1460,20 +1468,31 @@ int pm_runtime_barrier(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1532,6 +1551,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1764,8 +1787,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1874,7 +1897,7 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||
@@ -1959,7 +1982,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume)
goto out;
/*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 752b417e8129..63bf914a4d44 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -197,7 +197,7 @@ void wakeup_source_remove(struct wakeup_source *ws)
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
- del_timer_sync(&ws->timer);
+ timer_delete_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
@@ -613,7 +613,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
wakeup_source_report_event(ws, false);
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
spin_unlock_irqrestore(&ws->lock, flags);
@@ -693,7 +693,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
ws->last_time = now;
- del_timer(&ws->timer);
+ timer_delete(&ws->timer);
ws->timer_expires = 0;
if (ws->autosleep_enabled)