summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-08 13:10:57 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-08 13:10:57 -0800
commiteb59c505f8a5906ad2e053d14fab50eb8574fd6f (patch)
treec6e875adc12b481b916e847e8f80b8881a0fb02c
parent1619ed8f60959829d070d8f39cd2f8ca0e7135ce (diff)
parentc233523b3d392e530033a7587d7970dc62a02361 (diff)
downloadlinux-eb59c505f8a5906ad2e053d14fab50eb8574fd6f.tar.gz
linux-eb59c505f8a5906ad2e053d14fab50eb8574fd6f.tar.bz2
linux-eb59c505f8a5906ad2e053d14fab50eb8574fd6f.zip
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) PM / Hibernate: Implement compat_ioctl for /dev/snapshot PM / Freezer: fix return value of freezable_schedule_timeout_killable() PM / shmobile: Allow the A4R domain to be turned off at run time PM / input / touchscreen: Make st1232 use device PM QoS constraints PM / QoS: Introduce dev_pm_qos_add_ancestor_request() PM / shmobile: Remove the stay_on flag from SH7372's PM domains PM / shmobile: Don't include SH7372's INTCS in syscore suspend/resume PM / shmobile: Add support for the sh7372 A4S power domain / sleep mode PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM/Devfreq: Add Exynos4-bus device DVFS driver for Exynos4210/4212/4412. PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() ARM: S3C64XX: Implement basic power domain support PM / shmobile: Use common always on power domain governor ... Fix up trivial conflict in fs/xfs/xfs_buf.c due to removal of unused XBT_FORCE_SLEEP bit
-rw-r--r--Documentation/feature-removal-schedule.txt11
-rw-r--r--Documentation/power/devices.txt37
-rw-r--r--Documentation/power/freezing-of-tasks.txt39
-rw-r--r--Documentation/power/runtime_pm.txt130
-rw-r--r--arch/alpha/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/pm.c176
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h6
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c50
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c196
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c6
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S21
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h6
-rw-r--r--arch/avr32/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/frv/include/asm/thread_info.h2
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m68k/include/asm/thread_info.h1
-rw-r--r--arch/microblaze/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sparc/include/asm/thread_info_32.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/um/include/asm/thread_info.h2
-rw-r--r--arch/unicore32/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/xtensa/include/asm/thread_info.h2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/amba/bus.c136
-rw-r--r--drivers/base/firmware_class.c4
-rw-r--r--drivers/base/platform.c115
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c539
-rw-r--r--drivers/base/power/domain_governor.c156
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/dma/dmatest.c46
-rw-r--r--drivers/input/touchscreen/st1232.c13
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/sh/intc/core.c8
-rw-r--r--drivers/sh/intc/internals.h1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c2
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/jfs/jfs_txnmgr.c4
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/nfs3proc.c3
-rw-r--r--fs/nfs/nfs4proc.c5
-rw-r--r--fs/nfs/proc.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--include/linux/freezer.h159
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/platform_device.h30
-rw-r--r--include/linux/pm.h15
-rw-r--r--include/linux/pm_domain.h103
-rw-r--r--include/linux/pm_qos.h8
-rw-r--r--include/linux/pm_runtime.h5
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sh_intc.h1
-rw-r--r--include/linux/suspend.h35
-rw-r--r--kernel/cgroup_freezer.c63
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c203
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kmod.c27
-rw-r--r--kernel/kthread.c27
-rw-r--r--kernel/power/hibernate.c92
-rw-r--r--kernel/power/main.c10
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/process.c77
-rw-r--r--kernel/power/suspend.c12
-rw-r--r--kernel/power/user.c184
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/sunrpc/sched.c3
107 files changed, 3249 insertions, 1530 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 33f7327d0451..a1e7f3eec98f 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
---------------------------
-What: Deprecated snapshot ioctls
-When: 2.6.36
-
-Why: The ioctls in kernel/power/user.c were marked as deprecated long time
- ago. Now they notify users about that so that they need to replace
- their userspace. After some more time, remove them completely.
-
-Who: Jiri Slaby <jirislaby@gmail.com>
-
----------------------------
-
What: The ieee80211_regdom module parameter
When: March 2010 / desktop catchup
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 3139fb505dce..20af7def23c8 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
pointed to by the ops member of struct dev_pm_domain, or by the pm member of
struct bus_type, struct device_type and struct class. They are mostly of
interest to the people writing infrastructure for platforms and buses, like PCI
-or USB, or device type and device class drivers.
+or USB, or device type and device class drivers. They also are relevant to the
+writers of device drivers whose subsystems (PM domains, device types, device
+classes and bus types) don't provide all power management methods.
Bus drivers implement these methods as appropriate for the hardware and the
drivers using it; PCI works differently from USB, and so on. Not many people
@@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
-All phases use PM domain, bus, type, or class callbacks (that is, methods
-defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
-These callbacks are regarded by the PM core as mutually exclusive. Moreover,
-PM domain callbacks always take precedence over bus, type and class callbacks,
-while type callbacks take precedence over bus and class callbacks, and class
-callbacks take precedence over bus callbacks. To be precise, the following
-rules are used to determine which callback to execute in the given phase:
+All phases use PM domain, bus, type, class or driver callbacks (that is, methods
+defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
+dev->driver->pm). These callbacks are regarded by the PM core as mutually
+exclusive. Moreover, PM domain callbacks always take precedence over all of the
+other callbacks and, for example, type callbacks take precedence over bus, class
+and driver callbacks. To be precise, the following rules are used to determine
+which callback to execute in the given phase:
- 1. If dev->pm_domain is present, the PM core will attempt to execute the
- callback included in dev->pm_domain->ops. If that callback is not
- present, no action will be carried out for the given device.
+ 1. If dev->pm_domain is present, the PM core will choose the callback
+ included in dev->pm_domain->ops for execution
2. Otherwise, if both dev->type and dev->type->pm are present, the callback
- included in dev->type->pm will be executed.
+ included in dev->type->pm will be chosen for execution.
3. Otherwise, if both dev->class and dev->class->pm are present, the
- callback included in dev->class->pm will be executed.
+ callback included in dev->class->pm will be chosen for execution.
4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
- included in dev->bus->pm will be executed.
+ included in dev->bus->pm will be chosen for execution.
This allows PM domains and device types to override callbacks provided by bus
types or device classes if necessary.
-These callbacks may in turn invoke device- or driver-specific methods stored in
-dev->driver->pm, but they don't have to.
+The PM domain, type, class and bus callbacks may in turn invoke device- or
+driver-specific methods stored in dev->driver->pm, but they don't have to do
+that.
+
+If the subsystem callback chosen for execution is not present, the PM core will
+execute the corresponding method from dev->driver->pm instead if there is one.
Entering System Suspend
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
index 316c2ba187f4..6ccb68f68da6 100644
--- a/Documentation/power/freezing-of-tasks.txt
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
either wakes them up, if they are kernel threads, or sends fake signals to them,
if they are user space processes. A task that has TIF_FREEZE set, should react
-to it by calling the function called refrigerator() (defined in
+to it by calling the function called __refrigerator() (defined in
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
Then, we say that the task is 'frozen' and therefore the set of functions
@@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
User space processes are generally frozen before kernel threads.
-It is not recommended to call refrigerator() directly. Instead, it is
-recommended to use the try_to_freeze() function (defined in
-include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
-task enter refrigerator() if the flag is set.
+__refrigerator() must not be called directly. Instead, use the
+try_to_freeze() function (defined in include/linux/freezer.h), that checks
+the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
+flag is set.
For user space processes try_to_freeze() is called automatically from the
signal-handling code, but the freezable kernel threads need to call it
@@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
After the system memory state has been restored from a hibernation image and
devices have been reinitialized, the function thaw_processes() is called in
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
-have been frozen leave refrigerator() and continue running.
+have been frozen leave __refrigerator() and continue running.
III. Which kernel threads are freezable?
Kernel threads are not freezable by default. However, a kernel thread may clear
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
-directly is strongly discouraged). From this point it is regarded as freezable
+directly is not allowed). From this point it is regarded as freezable
and must call try_to_freeze() in a suitable place.
IV. Why do we do that?
@@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
A driver must have all firmwares it may need in RAM before suspend() is called.
If keeping them is not practical, for example due to their size, they must be
requested early enough using the suspend notifier API described in notifiers.txt.
+
+VI. Are there any precautions to be taken to prevent freezing failures?
+
+Yes, there are.
+
+First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
+from system-wide sleep such as suspend/hibernation is not encouraged.
+If possible, that piece of code must instead hook onto the suspend/hibernation
+notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
+(kernel/cpu.c) for an example.
+
+However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
+it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
+that could lead to freezing failures, because if the suspend/hibernate code
+successfully acquired the 'pm_mutex' lock, and hence that other entity failed
+to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
+state. As a consequence, the freezer would not be able to freeze that task,
+leading to freezing failure.
+
+However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
+since they ask the freezer to skip freezing this task, since it is anyway
+"frozen enough" as it is blocked on 'pm_mutex', which will be released
+only after the entire suspend/hibernation sequence is complete.
+So, to summarize, use [un]lock_system_sleep() instead of directly using
+mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index c2ae8bf77d46..4abe83e1045a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -57,6 +57,10 @@ the following:
4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
+If the subsystem chosen by applying the above rules doesn't provide the relevant
+callback, the PM core will invoke the corresponding driver callback stored in
+dev->driver->pm directly (if present).
+
The PM core always checks which callback to use in the order given above, so the
priority order of callbacks from high to low is: PM domain, device type, class
and bus type. Moreover, the high-priority one will always take precedence over
@@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
are referred to as subsystem-level callbacks in what follows.
By default, the callbacks are always invoked in process context with interrupts
-enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
-to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
-->runtime_idle() callbacks may be invoked in atomic context with interrupts
-disabled for a given device. This implies that the callback routines in
-question must not block or sleep, but it also means that the synchronous helper
-functions listed at the end of Section 4 may be used for that device within an
-interrupt handler or generally in an atomic context.
-
-The subsystem-level suspend callback is _entirely_ _responsible_ for handling
-the suspend of the device as appropriate, which may, but need not include
-executing the device driver's own ->runtime_suspend() callback (from the
+enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
+the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
+and ->runtime_idle() callbacks for the given device in atomic context with
+interrupts disabled. This implies that the callback routines in question must
+not block or sleep, but it also means that the synchronous helper functions
+listed at the end of Section 4 may be used for that device within an interrupt
+handler or generally in an atomic context.
+
+The subsystem-level suspend callback, if present, is _entirely_ _responsible_
+for handling the suspend of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_suspend() callback (from the
PM core's point of view it is not necessary to implement a ->runtime_suspend()
callback in a device driver as long as the subsystem-level suspend callback
knows what to do to handle the device).
- * Once the subsystem-level suspend callback has completed successfully
- for given device, the PM core regards the device as suspended, which need
- not mean that the device has been put into a low power state. It is
- supposed to mean, however, that the device will not process data and will
- not communicate with the CPU(s) and RAM until the subsystem-level resume
- callback is executed for it. The runtime PM status of a device after
- successful execution of the subsystem-level suspend callback is 'suspended'.
-
- * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
- the device's runtime PM status is 'active', which means that the device
- _must_ be fully operational afterwards.
-
- * If the subsystem-level suspend callback returns an error code different
- from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
- refuse to run the helper functions described in Section 4 for the device,
- until the status of it is directly set either to 'active', or to 'suspended'
- (the PM core provides special helper functions for this purpose).
-
-In particular, if the driver requires remote wake-up capability (i.e. hardware
+ * Once the subsystem-level suspend callback (or the driver suspend callback,
+ if invoked directly) has completed successfully for the given device, the PM
+ core regards the device as suspended, which need not mean that it has been
+ put into a low power state. It is supposed to mean, however, that the
+ device will not process data and will not communicate with the CPU(s) and
+ RAM until the appropriate resume callback is executed for it. The runtime
+ PM status of a device after successful execution of the suspend callback is
+ 'suspended'.
+
+ * If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
+ status remains 'active', which means that the device _must_ be fully
+ operational afterwards.
+
+ * If the suspend callback returns an error code different from -EBUSY and
+ -EAGAIN, the PM core regards this as a fatal error and will refuse to run
+ the helper functions described in Section 4 for the device until its status
+ is directly set to either'active', or 'suspended' (the PM core provides
+ special helper functions for this purpose).
+
+In particular, if the driver requires remote wakeup capability (i.e. hardware
mechanism allowing the device to request a change of its power state, such as
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
-device_run_wake() returns 'true' for the device and the device is put into a low
-power state during the execution of the subsystem-level suspend callback, it is
-expected that remote wake-up will be enabled for the device. Generally, remote
-wake-up should be enabled for all input devices put into a low power state at
-run time.
-
-The subsystem-level resume callback is _entirely_ _responsible_ for handling the
-resume of the device as appropriate, which may, but need not include executing
-the device driver's own ->runtime_resume() callback (from the PM core's point of
-view it is not necessary to implement a ->runtime_resume() callback in a device
-driver as long as the subsystem-level resume callback knows what to do to handle
-the device).
-
- * Once the subsystem-level resume callback has completed successfully, the PM
- core regards the device as fully operational, which means that the device
- _must_ be able to complete I/O operations as needed. The runtime PM status
- of the device is then 'active'.
-
- * If the subsystem-level resume callback returns an error code, the PM core
- regards this as a fatal error and will refuse to run the helper functions
- described in Section 4 for the device, until its status is directly set
- either to 'active' or to 'suspended' (the PM core provides special helper
- functions for this purpose).
-
-The subsystem-level idle callback is executed by the PM core whenever the device
-appears to be idle, which is indicated to the PM core by two counters, the
-device's usage counter and the counter of 'active' children of the device.
+device_run_wake() returns 'true' for the device and the device is put into a
+low-power state during the execution of the suspend callback, it is expected
+that remote wakeup will be enabled for the device. Generally, remote wakeup
+should be enabled for all input devices put into low-power states at run time.
+
+The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
+handling the resume of the device as appropriate, which may, but need not
+include executing the device driver's own ->runtime_resume() callback (from the
+PM core's point of view it is not necessary to implement a ->runtime_resume()
+callback in a device driver as long as the subsystem-level resume callback knows
+what to do to handle the device).
+
+ * Once the subsystem-level resume callback (or the driver resume callback, if
+ invoked directly) has completed successfully, the PM core regards the device
+ as fully operational, which means that the device _must_ be able to complete
+ I/O operations as needed. The runtime PM status of the device is then
+ 'active'.
+
+ * If the resume callback returns an error code, the PM core regards this as a
+ fatal error and will refuse to run the helper functions described in Section
+ 4 for the device, until its status is directly set to either 'active', or
+ 'suspended' (by means of special helper functions provided by the PM core
+ for this purpose).
+
+The idle callback (a subsystem-level one, if present, or the driver one) is
+executed by the PM core whenever the device appears to be idle, which is
+indicated to the PM core by two counters, the device's usage counter and the
+counter of 'active' children of the device.
* If any of these counters is decreased using a helper function provided by
the PM core and it turns out to be equal to zero, the other counter is
checked. If that counter also is equal to zero, the PM core executes the
- subsystem-level idle callback with the device as an argument.
+ idle callback with the device as its argument.
-The action performed by a subsystem-level idle callback is totally dependent on
-the subsystem in question, but the expected and recommended action is to check
+The action performed by the idle callback is totally dependent on the subsystem
+(or driver) in question, but the expected and recommended action is to check
if the device can be suspended (i.e. if all of the conditions necessary for
suspending the device are satisfied) and to queue up a suspend request for the
device in that case. The value returned by this callback is ignored by the PM
core.
The helper functions provided by the PM core, described in Section 4, guarantee
-that the following constraints are met with respect to the bus type's runtime
-PM callbacks:
+that the following constraints are met with respect to runtime PM callbacks for
+one device:
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
->runtime_suspend() in parallel with ->runtime_resume() or with another
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index ff73db022342..28335bd40e40 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
-#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
/* Work to do on interrupt/exception return. */
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8dae06e..0f30c3a78fc1 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21
@@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 5552e048c2be..381586c7b1b2 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -8,6 +8,7 @@ config PLAT_S3C64XX
bool
depends on ARCH_S3C64XX
select SAMSUNG_WAKEMASK
+ select PM_GENERIC_DOMAINS
default y
help
Base platform code for any Samsung S3C64XX device
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index f1c848aa4a1e..fb786b6a2eae 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -706,7 +706,7 @@ static void __init crag6410_machine_init(void)
regulator_has_full_constraints();
- s3c_pm_init();
+ s3c64xx_pm_init();
}
MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index b375cd5c47cb..7d3e81b9dd06 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -17,10 +17,12 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/pm_domain.h>
#include <mach/map.h>
#include <mach/irqs.h>
+#include <plat/devs.h>
#include <plat/pm.h>
#include <plat/wakeup-mask.h>
@@ -31,6 +33,148 @@
#include <mach/regs-gpio-memport.h>
#include <mach/regs-modem.h>
+struct s3c64xx_pm_domain {
+ char *const name;
+ u32 ena;
+ u32 pwr_stat;
+ struct generic_pm_domain pd;
+};
+
+static int s3c64xx_pd_off(struct generic_pm_domain *domain)
+{
+ struct s3c64xx_pm_domain *pd;
+ u32 val;
+
+ pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val &= ~(pd->ena);
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
+ return 0;
+}
+
+static int s3c64xx_pd_on(struct generic_pm_domain *domain)
+{
+ struct s3c64xx_pm_domain *pd;
+ u32 val;
+ long retry = 1000000L;
+
+ pd = container_of(domain, struct s3c64xx_pm_domain, pd);
+
+ val = __raw_readl(S3C64XX_NORMAL_CFG);
+ val |= pd->ena;
+ __raw_writel(val, S3C64XX_NORMAL_CFG);
+
+ /* Not all domains provide power status readback */
+ if (pd->pwr_stat) {
+ do {
+ cpu_relax();
+ if (__raw_readl(S3C64XX_BLK_PWR_STAT) & pd->pwr_stat)
+ break;
+ } while (retry--);
+
+ if (!retry) {
+ pr_err("Failed to start domain %s\n", pd->name);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static struct s3c64xx_pm_domain s3c64xx_pm_irom = {
+ .name = "IROM",
+ .ena = S3C64XX_NORMALCFG_IROM_ON,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_etm = {
+ .name = "ETM",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_ETM_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_ETM,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_s = {
+ .name = "S",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_S_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_S,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_f = {
+ .name = "F",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_F_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_F,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_p = {
+ .name = "P",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_P_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_P,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_i = {
+ .name = "I",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_I_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_I,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_g = {
+ .name = "G",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_G_ON,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain s3c64xx_pm_v = {
+ .name = "V",
+ .ena = S3C64XX_NORMALCFG_DOMAIN_V_ON,
+ .pwr_stat = S3C64XX_BLKPWRSTAT_V,
+ .pd = {
+ .power_off = s3c64xx_pd_off,
+ .power_on = s3c64xx_pd_on,
+ },
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_always_on_pm_domains[] = {
+ &s3c64xx_pm_irom,
+};
+
+static struct s3c64xx_pm_domain *s3c64xx_pm_domains[] = {
+ &s3c64xx_pm_etm,
+ &s3c64xx_pm_g,
+ &s3c64xx_pm_v,
+ &s3c64xx_pm_i,
+ &s3c64xx_pm_p,
+ &s3c64xx_pm_s,
+ &s3c64xx_pm_f,
+};
+
#ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
void s3c_pm_debug_smdkled(u32 set, u32 clear)
{
@@ -89,6 +233,8 @@ static struct sleep_save misc_save[] = {
SAVE_ITEM(S3C64XX_SDMA_SEL),
SAVE_ITEM(S3C64XX_MODEM_MIFPCON),
+
+ SAVE_ITEM(S3C64XX_NORMAL_CFG),
};
void s3c_pm_configure_extint(void)
@@ -179,7 +325,26 @@ static void s3c64xx_pm_prepare(void)
__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
}
-static int s3c64xx_pm_init(void)
+int __init s3c64xx_pm_init(void)
+{
+ int i;
+
+ s3c_pm_init();
+
+ for (i = 0; i < ARRAY_SIZE(s3c64xx_always_on_pm_domains); i++)
+ pm_genpd_init(&s3c64xx_always_on_pm_domains[i]->pd,
+ &pm_domain_always_on_gov, false);
+
+ for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
+ pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
+
+ if (dev_get_platdata(&s3c_device_fb.dev))
+ pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
+
+ return 0;
+}
+
+static __init int s3c64xx_pm_initcall(void)
{
pm_cpu_prep = s3c64xx_pm_prepare;
pm_cpu_sleep = s3c64xx_cpu_suspend;
@@ -198,5 +363,12 @@ static int s3c64xx_pm_init(void)
return 0;
}
+arch_initcall(s3c64xx_pm_initcall);
+
+static __init int s3c64xx_pm_late_initcall(void)
+{
+ pm_genpd_poweroff_unused();
-arch_initcall(s3c64xx_pm_init);
+ return 0;
+}
+late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 4bf82c156771..be78a2c73db4 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -34,8 +34,8 @@ extern void sh7372_add_standard_devices(void);
extern void sh7372_clock_init(void);
extern void sh7372_pinmux_init(void);
extern void sh7372_pm_init(void);
-extern void sh7372_resume_core_standby_a3sm(void);
-extern int sh7372_do_idle_a3sm(unsigned long unused);
+extern void sh7372_resume_core_standby_sysc(void);
+extern int sh7372_do_idle_sysc(unsigned long sleep_mode);
extern struct clk sh7372_extal1_clk;
extern struct clk sh7372_extal2_clk;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 84532f9629b2..8254ab86f6cd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -480,11 +480,10 @@ struct platform_device;
struct sh7372_pm_domain {
struct generic_pm_domain genpd;
struct dev_power_governor *gov;
- void (*suspend)(void);
+ int (*suspend)(void);
void (*resume)(void);
unsigned int bit_shift;
bool no_debug;
- bool stay_on;
};
static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
@@ -499,6 +498,7 @@ extern struct sh7372_pm_domain sh7372_d4;
extern struct sh7372_pm_domain sh7372_a4r;
extern struct sh7372_pm_domain sh7372_a3rv;
extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a4s;
extern struct sh7372_pm_domain sh7372_a3sp;
extern struct sh7372_pm_domain sh7372_a3sg;
@@ -515,5 +515,7 @@ extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
extern void sh7372_intcs_suspend(void);
extern void sh7372_intcs_resume(void);
+extern void sh7372_intca_suspend(void);
+extern void sh7372_intca_resume(void);
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 2d8856df80e2..89afcaba99a1 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -535,6 +535,7 @@ static struct resource intcs_resources[] __initdata = {
static struct intc_desc intcs_desc __initdata = {
.name = "sh7372-intcs",
.force_enable = ENABLED_INTCS,
+ .skip_syscore_suspend = true,
.resource = intcs_resources,
.num_resources = ARRAY_SIZE(intcs_resources),
.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
@@ -611,3 +612,52 @@ void sh7372_intcs_resume(void)
for (k = 0x80; k <= 0x9c; k += 4)
__raw_writeb(ffd5[k], intcs_ffd5 + k);
}
+
+static unsigned short e694[0x200];
+static unsigned short e695[0x200];
+
+void sh7372_intca_suspend(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x38; k += 4)
+ e694[k] = __raw_readw(0xe6940000 + k);
+
+ for (k = 0x80; k <= 0xb4; k += 4)
+ e694[k] = __raw_readb(0xe6940000 + k);
+
+ for (k = 0x180; k <= 0x1b4; k += 4)
+ e694[k] = __raw_readb(0xe6940000 + k);
+
+ for (k = 0x00; k <= 0x50; k += 4)
+ e695[k] = __raw_readw(0xe6950000 + k);
+
+ for (k = 0x80; k <= 0xa8; k += 4)
+ e695[k] = __raw_readb(0xe6950000 + k);
+
+ for (k = 0x180; k <= 0x1a8; k += 4)
+ e695[k] = __raw_readb(0xe6950000 + k);
+}
+
+void sh7372_intca_resume(void)
+{
+ int k;
+
+ for (k = 0x00; k <= 0x38; k += 4)
+ __raw_writew(e694[k], 0xe6940000 + k);
+
+ for (k = 0x80; k <= 0xb4; k += 4)
+ __raw_writeb(e694[k], 0xe6940000 + k);
+
+ for (k = 0x180; k <= 0x1b4; k += 4)
+ __raw_writeb(e694[k], 0xe6940000 + k);
+
+ for (k = 0x00; k <= 0x50; k += 4)
+ __raw_writew(e695[k], 0xe6950000 + k);
+
+ for (k = 0x80; k <= 0xa8; k += 4)
+ __raw_writeb(e695[k], 0xe6950000 + k);
+
+ for (k = 0x180; k <= 0x1a8; k += 4)
+ __raw_writeb(e695[k], 0xe6950000 + k);
+}
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 34bbcbfb1706..77b8fc12fc2f 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -82,11 +82,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
unsigned int mask = 1 << sh7372_pd->bit_shift;
- if (sh7372_pd->suspend)
- sh7372_pd->suspend();
+ if (sh7372_pd->suspend) {
+ int ret = sh7372_pd->suspend();
- if (sh7372_pd->stay_on)
- return 0;
+ if (ret)
+ return ret;
+ }
if (__raw_readl(PSTR) & mask) {
unsigned int retry_count;
@@ -101,8 +102,8 @@ static int pd_power_down(struct generic_pm_domain *genpd)
}
if (!sh7372_pd->no_debug)
- pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
+ genpd->name, mask, __raw_readl(PSTR));
return 0;
}
@@ -113,9 +114,6 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
unsigned int retry_count;
int ret = 0;
- if (sh7372_pd->stay_on)
- goto out;
-
if (__raw_readl(PSTR) & mask)
goto out;
@@ -133,8 +131,8 @@ static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
ret = -EIO;
if (!sh7372_pd->no_debug)
- pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
- mask, __raw_readl(PSTR));
+ pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
+ sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
out:
if (ret == 0 && sh7372_pd->resume && do_resume)
@@ -148,35 +146,60 @@ static int pd_power_up(struct generic_pm_domain *genpd)
return __pd_power_up(to_sh7372_pd(genpd), true);
}
-static void sh7372_a4r_suspend(void)
+static int sh7372_a4r_suspend(void)
{
sh7372_intcs_suspend();
__raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
+ return 0;
}
static bool pd_active_wakeup(struct device *dev)
{
- return true;
+ bool (*active_wakeup)(struct device *dev);
+
+ active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
+ return active_wakeup ? active_wakeup(dev) : true;
}
-static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
+static int sh7372_stop_dev(struct device *dev)
{
- return false;
+ int (*stop)(struct device *dev);
+
+ stop = dev_gpd_data(dev)->ops.stop;
+ if (stop) {
+ int ret = stop(dev);
+ if (ret)
+ return ret;
+ }
+ return pm_clk_suspend(dev);
}
-struct dev_power_governor sh7372_always_on_gov = {
- .power_down_ok = sh7372_power_down_forbidden,
-};
+static int sh7372_start_dev(struct device *dev)
+{
+ int (*start)(struct device *dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ start = dev_gpd_data(dev)->ops.start;
+ if (start)
+ ret = start(dev);
+
+ return ret;
+}
void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
{
struct generic_pm_domain *genpd = &sh7372_pd->genpd;
+ struct dev_power_governor *gov = sh7372_pd->gov;
- pm_genpd_init(genpd, sh7372_pd->gov, false);
- genpd->stop_device = pm_clk_suspend;
- genpd->start_device = pm_clk_resume;
+ pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
+ genpd->dev_ops.stop = sh7372_stop_dev;
+ genpd->dev_ops.start = sh7372_start_dev;
+ genpd->dev_ops.active_wakeup = pd_active_wakeup;
genpd->dev_irq_safe = true;
- genpd->active_wakeup = pd_active_wakeup;
genpd->power_off = pd_power_down;
genpd->power_on = pd_power_up;
__pd_power_up(sh7372_pd, false);
@@ -199,48 +222,73 @@ void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
}
struct sh7372_pm_domain sh7372_a4lc = {
+ .genpd.name = "A4LC",
.bit_shift = 1,
};
struct sh7372_pm_domain sh7372_a4mp = {
+ .genpd.name = "A4MP",
.bit_shift = 2,
};
struct sh7372_pm_domain sh7372_d4 = {
+ .genpd.name = "D4",
.bit_shift = 3,
};
struct sh7372_pm_domain sh7372_a4r = {
+ .genpd.name = "A4R",
.bit_shift = 5,
- .gov = &sh7372_always_on_gov,
.suspend = sh7372_a4r_suspend,
.resume = sh7372_intcs_resume,
- .stay_on = true,
};
struct sh7372_pm_domain sh7372_a3rv = {
+ .genpd.name = "A3RV",
.bit_shift = 6,
};
struct sh7372_pm_domain sh7372_a3ri = {
+ .genpd.name = "A3RI",
.bit_shift = 8,
};
-struct sh7372_pm_domain sh7372_a3sp = {
- .bit_shift = 11,
- .gov = &sh7372_always_on_gov,
+static int sh7372_a4s_suspend(void)
+{
+ /*
+ * The A4S domain contains the CPU core and therefore it should
+ * only be turned off if the CPU is in use.
+ */
+ return -EBUSY;
+}
+
+struct sh7372_pm_domain sh7372_a4s = {
+ .genpd.name = "A4S",
+ .bit_shift = 10,
+ .gov = &pm_domain_always_on_gov,
.no_debug = true,
+ .suspend = sh7372_a4s_suspend,
};
-static void sh7372_a3sp_init(void)
+static int sh7372_a3sp_suspend(void)
{
- /* serial consoles make use of SCIF hardware located in A3SP,
+ /*
+ * Serial consoles make use of SCIF hardware located in A3SP,
* keep such power domain on if "no_console_suspend" is set.
*/
- sh7372_a3sp.stay_on = !console_suspend_enabled;
+ return console_suspend_enabled ? -EBUSY : 0;
}
+struct sh7372_pm_domain sh7372_a3sp = {
+ .genpd.name = "A3SP",
+ .bit_shift = 11,
+ .gov = &pm_domain_always_on_gov,
+ .no_debug = true,
+ .suspend = sh7372_a3sp_suspend,
+};
+
struct sh7372_pm_domain sh7372_a3sg = {
+ .genpd.name = "A3SG",
.bit_shift = 13,
};
@@ -257,11 +305,16 @@ static int sh7372_do_idle_core_standby(unsigned long unused)
return 0;
}
-static void sh7372_enter_core_standby(void)
+static void sh7372_set_reset_vector(unsigned long address)
{
/* set reset vector, translate 4k */
- __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
+ __raw_writel(address, SBAR);
__raw_writel(0, APARMBAREA);
+}
+
+static void sh7372_enter_core_standby(void)
+{
+ sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
/* enter sleep mode with SYSTBCR to 0x10 */
__raw_writel(0x10, SYSTBCR);
@@ -274,27 +327,22 @@ static void sh7372_enter_core_standby(void)
#endif
#ifdef CONFIG_SUSPEND
-static void sh7372_enter_a3sm_common(int pllc0_on)
+static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
{
- /* set reset vector, translate 4k */
- __raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
- __raw_writel(0, APARMBAREA);
-
if (pllc0_on)
__raw_writel(0, PLLC01STPCR);
else
__raw_writel(1 << 28, PLLC01STPCR);
- __raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
- cpu_suspend(0, sh7372_do_idle_a3sm);
+ cpu_suspend(sleep_mode, sh7372_do_idle_sysc);
__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
/* disable reset vector translation */
__raw_writel(0, SBAR);
}
-static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
+static int sh7372_sysc_valid(unsigned long *mskp, unsigned long *msk2p)
{
unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
unsigned long msk, msk2;
@@ -382,7 +430,7 @@ static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
*irqcr2p = irqcr2;
}
-static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
+static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
{
u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
unsigned long tmp;
@@ -415,6 +463,22 @@ static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
}
+
+static void sh7372_enter_a3sm_common(int pllc0_on)
+{
+ sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
+ sh7372_enter_sysc(pllc0_on, 1 << 12);
+}
+
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+ sh7372_intca_suspend();
+ memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+ sh7372_set_reset_vector(SMFRAM);
+ sh7372_enter_sysc(pllc0_on, 1 << 10);
+ sh7372_intca_resume();
+}
+
#endif
#ifdef CONFIG_CPU_IDLE
@@ -448,14 +512,20 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
unsigned long msk, msk2;
/* check active clocks to determine potential wakeup sources */
- if (sh7372_a3sm_valid(&msk, &msk2)) {
-
+ if (sh7372_sysc_valid(&msk, &msk2)) {
/* convert INTC mask and sense to SYSC mask and sense */
- sh7372_setup_a3sm(msk, msk2);
-
- /* enter A3SM sleep with PLLC0 off */
- pr_debug("entering A3SM\n");
- sh7372_enter_a3sm_common(0);
+ sh7372_setup_sysc(msk, msk2);
+
+ if (!console_suspend_enabled &&
+ sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
+ /* enter A4S sleep with PLLC0 off */
+ pr_debug("entering A4S\n");
+ sh7372_enter_a4s_common(0);
+ } else {
+ /* enter A3SM sleep with PLLC0 off */
+ pr_debug("entering A3SM\n");
+ sh7372_enter_a3sm_common(0);
+ }
} else {
/* default to Core Standby that supports all wakeup sources */
pr_debug("entering Core Standby\n");
@@ -464,9 +534,37 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
return 0;
}
+/**
+ * sh7372_pm_notifier_fn - SH7372 PM notifier routine.
+ * @notifier: Unused.
+ * @pm_event: Event being handled.
+ * @unused: Unused.
+ */
+static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ /*
+ * This is necessary, because the A4R domain has to be "on"
+ * when suspend_device_irqs() and resume_device_irqs() are
+ * executed during system suspend and resume, respectively, so
+ * that those functions don't crash while accessing the INTCS.
+ */
+ pm_genpd_poweron(&sh7372_a4r.genpd);
+ break;
+ case PM_POST_SUSPEND:
+ pm_genpd_poweroff_unused();
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void sh7372_suspend_init(void)
{
shmobile_suspend_ops.enter = sh7372_enter_suspend;
+ pm_notifier(sh7372_pm_notifier_fn, 0);
}
#else
static void sh7372_suspend_init(void) {}
@@ -482,8 +580,6 @@ void __init sh7372_pm_init(void)
/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
__raw_writel(0, PDNSEL);
- sh7372_a3sp_init();
-
sh7372_suspend_init();
sh7372_cpuidle_init();
}
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 2380389e6ac5..c197f9d29d04 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -994,12 +994,16 @@ void __init sh7372_add_standard_devices(void)
sh7372_init_pm_domain(&sh7372_a4r);
sh7372_init_pm_domain(&sh7372_a3rv);
sh7372_init_pm_domain(&sh7372_a3ri);
- sh7372_init_pm_domain(&sh7372_a3sg);
+ sh7372_init_pm_domain(&sh7372_a4s);
sh7372_init_pm_domain(&sh7372_a3sp);
+ sh7372_init_pm_domain(&sh7372_a3sg);
sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
+ sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
+ sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
+
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index f3ab3c5810ea..1d564674451d 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -37,13 +37,18 @@
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
.align 12
.text
- .global sh7372_resume_core_standby_a3sm
-sh7372_resume_core_standby_a3sm:
+ .global sh7372_resume_core_standby_sysc
+sh7372_resume_core_standby_sysc:
ldr pc, 1f
1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
- .global sh7372_do_idle_a3sm
-sh7372_do_idle_a3sm:
+#define SPDCR 0xe6180008
+
+ /* A3SM & A4S power down */
+ .global sh7372_do_idle_sysc
+sh7372_do_idle_sysc:
+ mov r8, r0 /* sleep mode passed in r0 */
+
/*
* Clear the SCTLR.C bit to prevent further data cache
* allocation. Clearing SCTLR.C would make all the data accesses
@@ -80,13 +85,9 @@ sh7372_do_idle_a3sm:
dsb
dmb
-#define SPDCR 0xe6180008
-#define A3SM (1 << 12)
-
- /* A3SM power down */
+ /* SYSC power down */
ldr r0, =SPDCR
- ldr r1, =A3SM
- str r1, [r0]
+ str r8, [r0]
1:
b 1b
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 78014e53eb3c..61fc53740fbd 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -22,6 +22,7 @@ struct device;
#ifdef CONFIG_PM
extern __init int s3c_pm_init(void);
+extern __init int s3c64xx_pm_init(void);
#else
@@ -29,6 +30,11 @@ static inline int s3c_pm_init(void)
{
return 0;
}
+
+static inline int s3c64xx_pm_init(void)
+{
+ return 0;
+}
#endif
/* configuration for the IRQ mask over sleep */
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index 7a9c03dcb0b6..e5deda4691db 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
-#define TIF_FREEZE 29
#define TIF_DEBUG 30 /* debugging enabled */
#define TIF_USERSPACE 31 /* true if FS sets userspace */
@@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
/* Note: The masks below must never span more than 16 bits! */
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8a121..53ad10005ae3 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
-#define TIF_FREEZE 6 /* is freezing for suspend */
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 332f19c54557..29b92884d793 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -86,7 +86,6 @@ struct thread_info {
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -94,7 +93,6 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index cefbe73dc119..92d83ea99ae5 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index d6f1784bfdee..9c126e0c09aa 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
-#define TIF_FREEZE 16 /* is freezing for suspend */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff0cc84e7bcc..e054bcc4273c 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -113,7 +113,6 @@ struct thread_info {
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
-#define TIF_FREEZE 20 /* is freezing for suspend */
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -126,7 +125,6 @@ struct thread_info {
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
/* "work to do on user-return" bits */
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 0227dba44068..bf8fa3c06f4e 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 29fa6da4f17c..e8665e6f9464 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -76,7 +76,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
-#define TIF_FREEZE 17 /* thread is freezing for suspend */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
#endif /* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index b73da2ac21b3..1a8ab6a5c03f 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
-#define TIF_FREEZE 14 /* Freezing for suspend */
/* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_POLLING_NRFLAG 16
@@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_IRET (1 << TIF_IRET)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6639e7..0d85d8e440c5 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19
#define TIF_FIXADE 20 /* Fix address errors in software */
#define TIF_LOGADE 21 /* Log address errors to syslog */
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
@@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_FIXADE (1<<TIF_FIXADE)
#define _TIF_LOGADE (1<<TIF_LOGADE)
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 87c213002d4c..28cf52100baa 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
-#define TIF_FREEZE 18 /* freezing for suspend */
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
@@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE +(1 << TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index aa8de727e90b..6d9c7c7973d0 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -58,7 +58,6 @@ struct thread_info {
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
-#define TIF_FREEZE 7 /* is freezing for suspend */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SINGLESTEP 9 /* single stepping? */
#define TIF_BLOCKSTEP 10 /* branch stepping? */
@@ -69,7 +68,6 @@ struct thread_info {
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 836f231ec1f0..964714940961 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
-#define TIF_FREEZE 14 /* Freezing for suspend */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
@@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f65af61996bd..8b086299ba25 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
- .pm = GENERIC_SUBSYS_PM_OPS,
};
/**
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index a23183423b14..a73038155e0d 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
-#define TIF_FREEZE 21 /* thread is freezing for suspend */
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#ifdef CONFIG_64BIT
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index ea2d5089de1e..20ee40af16e9 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19 /* Freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
/*
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index fa5753233410..5cc5888ad5a3 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
* TIF_NEED_RESCHED */
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
-#define TIF_FREEZE 11 /* is freezing for suspend */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
_TIF_SIGPENDING | \
_TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 60d86be1a533..01d057fe6a3f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 12 is available */
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 14
-#define TIF_FREEZE 15 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
_TIF_DO_NOTIFY_RESUME_MASK | \
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 5bd1bad33fab..200c4ab1240c 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_AUDIT 6
#define TIF_RESTORE_SIGMASK 7
-#define TIF_FREEZE 16 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index c270e9e04861..89f7557583b8 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_MEMDIE 18
-#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 185b719ec61a..74047159d0ab 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -91,7 +91,6 @@ struct thread_info {
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_DEBUG 21 /* uses debug registers */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
-#define TIF_FREEZE 23 /* is freezing for suspend */
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
@@ -113,7 +112,6 @@ struct thread_info {
#define _TIF_FORK (1 << TIF_FORK)
#define _TIF_DEBUG (1 << TIF_DEBUG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be8accb0b0c..6abbedd09d85 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
-#define TIF_FREEZE 17 /* is freezing for suspend */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
-#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab58db2..0a7ed69546ba 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 936c98cb2475..54eaf96ab217 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -113,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
return ret;
}
-static int amba_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void amba_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define amba_pm_prepare NULL
-#define amba_pm_complete NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -159,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -193,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
return ret;
}
-static int amba_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_SUSPEND */
#define amba_pm_suspend NULL
#define amba_pm_resume NULL
-#define amba_pm_suspend_noirq NULL
-#define amba_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
@@ -238,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -272,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -306,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -340,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
return ret;
}
-static int amba_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
#define amba_pm_poweroff NULL
#define amba_pm_restore NULL
-#define amba_pm_freeze_noirq NULL
-#define amba_pm_thaw_noirq NULL
-#define amba_pm_poweroff_noirq NULL
-#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -406,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
- .prepare = amba_pm_prepare,
- .complete = amba_pm_complete,
.suspend = amba_pm_suspend,
.resume = amba_pm_resume,
.freeze = amba_pm_freeze,
.thaw = amba_pm_thaw,
.poweroff = amba_pm_poweroff,
.restore = amba_pm_restore,
- .suspend_noirq = amba_pm_suspend_noirq,
- .resume_noirq = amba_pm_resume_noirq,
- .freeze_noirq = amba_pm_freeze_noirq,
- .thaw_noirq = amba_pm_thaw_noirq,
- .poweroff_noirq = amba_pm_poweroff_noirq,
- .restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 3719c94be19c..26ab358dac62 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
return 0;
}
+ read_lock_usermodehelper();
+
if (WARN_ON(usermodehelper_is_disabled())) {
dev_err(device, "firmware: %s will not be loaded\n", name);
retval = -EBUSY;
@@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_destroy_instance(fw_priv);
out:
+ read_unlock_usermodehelper();
+
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a7c06374062e..f0c605e99ade 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
return ret;
}
-int platform_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-void platform_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
return ret;
}
-int platform_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
return ret;
}
-int platform_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
return ret;
}
-int platform_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
return ret;
}
-int platform_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
return ret;
}
-int platform_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
return ret;
}
-int platform_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd17900..2e58ebb1f6c0 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7eba5a..92e6a9048065 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/suspend.h>
+#include <linux/export.h>
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } else { \
+ __routine = dev_gpd_data(dev)->ops.callback; \
+ if (__routine) \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
+({ \
+ ktime_t __start = ktime_get(); \
+ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
+ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
+ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
+ if (__elapsed > __gpd_data->td.field) { \
+ __gpd_data->td.field = __elapsed; \
+ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ __elapsed); \
+ } \
+ __retval; \
+})
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
+static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
+ stop_latency_ns, "stop");
+}
+
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
+ start_latency_ns, "start");
+}
+
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
+ ktime_t time_start = ktime_get();
+ s64 elapsed_ns;
+
ret = genpd->power_on(genpd);
if (ret)
goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_on_latency_ns) {
+ genpd->power_on_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-on latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
int ret = 0;
if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_suspend) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- ret = drv->pm->runtime_suspend(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ ret = genpd_save_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
if (!gpd_data->need_restore)
return;
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_resume) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- drv->pm->runtime_resume(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
if (genpd->power_off) {
+ ktime_t time_start;
+ s64 elapsed_ns;
+
if (atomic_read(&genpd->sd_count) > 0) {
ret = -EBUSY;
goto out;
}
+ time_start = ktime_get();
+
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_set_active(genpd);
goto out;
}
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_off_latency_ns) {
+ genpd->power_off_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-off latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd->power_off_time = ktime_get();
+
+ /* Update PM QoS information for devices in the domain. */
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
+
+ pm_runtime_update_max_time_suspended(pdd->dev,
+ td->start_latency_ns +
+ td->restore_state_latency_ns +
+ genpd->power_on_latency_ns);
+ }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ bool (*stop_ok)(struct device *__dev);
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (genpd->stop_device) {
- int ret = genpd->stop_device(dev);
- if (ret)
- return ret;
- }
+ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+ if (stop_ok && !stop_ok(dev))
+ return -EBUSY;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_update_max_time_suspended(dev,
+ dev_gpd_data(dev)->td.start_latency_ns);
/*
* If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
mutex_unlock(&genpd->lock);
out:
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
return 0;
}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+}
+
+static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
+}
+
+static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
+}
+
+static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
+}
+
+static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
+}
+
+static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
+}
+
+static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
+}
+
+static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
+}
+
+static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
+}
+
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ active_wakeup = genpd_dev_active_wakeup(genpd, dev);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
* so pm_genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+ return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
}
/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_suspend_noirq(dev);
+ ret = genpd_suspend_late(genpd, dev);
if (ret)
return ret;
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_resume_noirq(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+ return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
}
/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+ return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
}
/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_freeze_noirq(dev);
+ ret = genpd_freeze_late(genpd, dev);
if (ret)
return ret;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
return 0;
}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_thaw_noirq(dev);
+ return genpd_thaw_early(genpd, dev);
}
/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Power off a device under the assumption that its pm_domain field points to
- * the domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late powering off of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off)
- return 0;
-
- ret = pm_generic_poweroff_noirq(dev);
- if (ret)
- return ret;
-
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
- return 0;
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
-
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
}
/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
-
- return pm_generic_restore_noirq(dev);
-}
-
-/**
- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
- * @dev: Device to resume.
- *
- * Restore a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_restore(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
+ genpd_start_dev(genpd, dev);
- return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
-#define pm_genpd_dev_poweroff_noirq NULL
-#define pm_genpd_dev_poweroff NULL
#define pm_genpd_restore_noirq NULL
-#define pm_genpd_restore NULL
#define pm_genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
/**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
* @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
*/
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct gpd_timing_data *td)
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
gpd_data->base.dev = dev;
gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ if (td)
+ gpd_data->td = *td;
out:
genpd_release_lock(genpd);
@@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+ * @dev: Device to add the callbacks to.
+ * @ops: Set of callbacks to add.
+ * @td: Timing data to add to the device along with the callbacks (optional).
+ */
+int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data && ops))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+
+/**
+ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+ * @dev: Device to remove the callbacks from.
+ * @clear_td: If set, clear the device's timing data too.
+ */
+int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (clear_td)
+ gpd_data->td = (struct gpd_timing_data){ 0 };
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+
+/* Default device callbacks for generic PM domains. */
+
+/**
+ * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_save_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.save_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend)
+ return drv->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_restore_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.restore_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_resume)
+ return drv->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+
+ return cb ? cb(dev) : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+
+ return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+
+ return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume - Default "device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+
+ return cb ? cb(dev) : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
+
+ return cb ? cb(dev) : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+
+ return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+
+ return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
+
+ return cb ? cb(dev) : pm_generic_thaw(dev);
+}
+
+/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
+ genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
- genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->dev_ops.save_state = pm_genpd_default_save_state;
+ genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+ genpd->dev_ops.suspend = pm_genpd_default_suspend;
+ genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
+ genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
+ genpd->dev_ops.resume = pm_genpd_default_resume;
+ genpd->dev_ops.freeze = pm_genpd_default_freeze;
+ genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+ genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+ genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000000..51527ee92d10
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,156 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+/**
+ * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * @dev: Device to check.
+ */
+bool default_stop_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
+ return true;
+
+ return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
+ && td->break_even_ns < dev->power.max_time_suspended_ns;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_dev_off_time_ns;
+ s64 off_on_time_ns;
+ ktime_t time_now = ktime_get();
+
+ off_on_time_ns = genpd->power_off_latency_ns +
+ genpd->power_on_latency_ns;
+ /*
+ * It doesn't make sense to remove power from the domain if saving
+ * the state of all devices in it and the power off/power on operations
+ * take too much time.
+ *
+ * All devices in this domain have been stopped already at this point.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ if (pdd->dev->driver)
+ off_on_time_ns +=
+ to_gpd_data(pdd)->td.save_state_latency_ns;
+ }
+
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ struct generic_pm_domain *sd = link->slave;
+ s64 sd_max_off_ns = sd->max_off_time_ns;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
+ sd->power_off_time));
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ min_dev_off_time_ns = -1;
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ struct device *dev = pdd->dev;
+ s64 dev_off_time_ns;
+
+ if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ continue;
+
+ td = &to_gpd_data(pdd)->td;
+ dev_off_time_ns = dev->power.max_time_suspended_ns -
+ (td->start_latency_ns + td->restore_state_latency_ns +
+ ktime_to_ns(ktime_sub(time_now,
+ dev->power.suspend_time)));
+ if (dev_off_time_ns <= off_on_time_ns)
+ return false;
+
+ if (min_dev_off_time_ns > dev_off_time_ns
+ || min_dev_off_time_ns < 0)
+ min_dev_off_time_ns = dev_off_time_ns;
+ }
+
+ if (min_dev_off_time_ns < 0) {
+ /*
+ * There are no latency constraints, so the domain can spend
+ * arbitrary time in the "off" state.
+ */
+ genpd->max_off_time_ns = -1;
+ return true;
+ }
+
+ /*
+ * The difference between the computed minimum delta and the time needed
+ * to turn the domain on is the maximum theoretical time this domain can
+ * spend in the "off" state.
+ */
+ min_dev_off_time_ns -= genpd->power_on_latency_ns;
+
+ /*
+ * If the difference between the computed minimum delta and the time
+ * needed to turn the domain off and back on on is smaller than the
+ * domain's power break even time, removing power from the domain is not
+ * worth it.
+ */
+ if (genpd->break_even_ns >
+ min_dev_off_time_ns - genpd->power_off_latency_ns)
+ return false;
+
+ genpd->max_off_time_ns = min_dev_off_time_ns;
+ return true;
+}
+
+struct dev_power_governor simple_qos_governor = {
+ .stop_ok = default_stop_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+ return false;
+}
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .power_down_ok = always_on_power_down_ok,
+ .stop_ok = default_stop_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee3b49e..10bdd793f0bd 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
- * If the device has not been suspended at run time, execute the
- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
- * return its error code. Otherwise, return zero.
+ * Execute the PM callback corresponding to @event provided by the driver of
+ * @dev, if defined, and return its error code. Return 0 if the callback is
+ * not present.
*/
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
- if (!pm || pm_runtime_suspended(dev))
+ if (!pm)
return 0;
switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
case PM_EVENT_HIBERNATE:
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
+ case PM_EVENT_RESUME:
+ callback = noirq ? pm->resume_noirq : pm->resume;
+ break;
case PM_EVENT_THAW:
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
+ case PM_EVENT_RESTORE:
+ callback = noirq ? pm->restore_noirq : pm->restore;
+ break;
default:
callback = NULL;
break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw);
/**
- * __pm_generic_resume - Generic resume/restore callback for subsystems.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the resume/resotre callback provided by the @dev's driver, if
- * defined. If it returns 0, change the device's runtime PM status to 'active'.
- * Return the callback's error code.
- */
-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
- int ret;
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
-
- if (!callback)
- return 0;
-
- ret = callback(dev);
- if (!ret && !noirq && pm_runtime_enabled(dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
-}
-
-/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, true);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
pm_runtime_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
-
-struct dev_pm_ops generic_subsys_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = pm_generic_prepare,
- .suspend = pm_generic_suspend,
- .suspend_noirq = pm_generic_suspend_noirq,
- .resume = pm_generic_resume,
- .resume_noirq = pm_generic_resume_noirq,
- .freeze = pm_generic_freeze,
- .freeze_noirq = pm_generic_freeze_noirq,
- .thaw = pm_generic_thaw,
- .thaw_noirq = pm_generic_thaw_noirq,
- .poweroff = pm_generic_poweroff,
- .poweroff_noirq = pm_generic_poweroff_noirq,
- .restore = pm_generic_restore,
- .restore_noirq = pm_generic_restore_noirq,
- .complete = pm_generic_complete,
-#endif
-#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
-#endif
-};
-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfcf438d..e2cc3d2e0ecc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
#include "../base.h"
#include "power.h"
+typedef int (*pm_callback_t)(struct device *);
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
ktime_t calltime = ktime_set(0, 0);
if (initcall_debug) {
- pr_info("calling %s+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
}
/**
- * pm_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend) {
- error = ops->suspend(dev);
- suspend_report_result(ops->suspend, error);
- }
- break;
+ return ops->suspend;
case PM_EVENT_RESUME:
- if (ops->resume) {
- error = ops->resume(dev);
- suspend_report_result(ops->resume, error);
- }
- break;
+ return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze) {
- error = ops->freeze(dev);
- suspend_report_result(ops->freeze, error);
- }
- break;
+ return ops->freeze;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff) {
- error = ops->poweroff(dev);
- suspend_report_result(ops->poweroff, error);
- }
- break;
+ return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw) {
- error = ops->thaw(dev);
- suspend_report_result(ops->thaw, error);
- }
+ return ops->thaw;
break;
case PM_EVENT_RESTORE:
- if (ops->restore) {
- error = ops->restore(dev);
- suspend_report_result(ops->restore, error);
- }
- break;
+ return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
}
- initcall_debug_report(dev, calltime, error);
-
- return error;
+ return NULL;
}
/**
- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int pm_noirq_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime = ktime_set(0, 0), delta, rettime;
-
- if (initcall_debug) {
- pr_info("calling %s+ @ %i, parent: %s\n",
- dev_name(dev), task_pid_nr(current),
- dev->parent ? dev_name(dev->parent) : "none");
- calltime = ktime_get();
- }
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend_noirq) {
- error = ops->suspend_noirq(dev);
- suspend_report_result(ops->suspend_noirq, error);
- }
- break;
+ return ops->suspend_noirq;
case PM_EVENT_RESUME:
- if (ops->resume_noirq) {
- error = ops->resume_noirq(dev);
- suspend_report_result(ops->resume_noirq, error);
- }
- break;
+ return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze_noirq) {
- error = ops->freeze_noirq(dev);
- suspend_report_result(ops->freeze_noirq, error);
- }
- break;
+ return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff_noirq) {
- error = ops->poweroff_noirq(dev);
- suspend_report_result(ops->poweroff_noirq, error);
- }
- break;
+ return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw_noirq) {
- error = ops->thaw_noirq(dev);
- suspend_report_result(ops->thaw_noirq, error);
- }
- break;
+ return ops->thaw_noirq;
case PM_EVENT_RESTORE:
- if (ops->restore_noirq) {
- error = ops->restore_noirq(dev);
- suspend_report_result(ops->restore_noirq, error);
- }
- break;
+ return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
- }
-
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- printk("initcall %s_i+ returned %d after %Ld usecs\n",
- dev_name(dev), error,
- (unsigned long long)ktime_to_ns(delta) >> 10);
}
- return error;
+ return NULL;
}
static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev);
+
+ pm_dev_dbg(dev, state, info);
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+ info = "EARLY power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "EARLY type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
+ info = "EARLY type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "EARLY class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
+ info = "EARLY class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
+ info = "EARLY bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "EARLY driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
TRACE_RESUME(error);
return error;
}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * @dev: Device to resume.
- * @cb: Resume callback to execute.
- */
-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
-{
- int error;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
- error = cb(dev);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
bool put = false;
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
put = true;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
} else if (dev->class->resume) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_resume(dev, dev->class->resume);
+ info = "legacy class ";
+ callback = dev->class->resume;
goto End;
}
}
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
}
}
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
End:
+ error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
+ void (*callback)(struct device *) = NULL;
+ char *info = NULL;
+
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pm_domain->ops.complete)
- dev->pm_domain->ops.complete(dev);
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "completing type ");
- if (dev->type->pm->complete)
- dev->type->pm->complete(dev);
+ info = "completing type ";
+ callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "completing class ");
- if (dev->class->pm->complete)
- dev->class->pm->complete(dev);
+ info = "completing class ";
+ callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "completing ");
- if (dev->bus->pm->complete)
- dev->bus->pm->complete(dev);
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
}
device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error;
+ pm_callback_t callback = NULL;
+ char *info = NULL;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
- if (error)
- return error;
+ info = "LATE power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "LATE type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- return error;
+ info = "LATE type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- return error;
+ info = "LATE class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "LATE ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- return error;
+ info = "LATE bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
- return 0;
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "LATE driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
}
/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
- pm_dev_dbg(dev, state, "legacy ");
+ pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend);
+ goto End;
}
}
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
End:
if (!error) {
dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
+ int (*callback)(struct device *) = NULL;
+ char *info = NULL;
int error = 0;
device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = device_may_wakeup(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pm_domain->ops.prepare)
- error = dev->pm_domain->ops.prepare(dev);
- suspend_report_result(dev->pm_domain->ops.prepare, error);
- if (error)
- goto End;
+ info = "preparing power domain ";
+ callback = dev->pm_domain->ops.prepare;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "preparing type ");
- if (dev->type->pm->prepare)
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing type ";
+ callback = dev->type->pm->prepare;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "preparing class ");
- if (dev->class->pm->prepare)
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing class ";
+ callback = dev->class->pm->prepare;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "preparing ");
- if (dev->bus->pm->prepare)
- error = dev->bus->pm->prepare(dev);
- suspend_report_result(dev->bus->pm->prepare, error);
+ info = "preparing bus ";
+ callback = dev->bus->pm->prepare;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "preparing driver ";
+ callback = dev->driver->pm->prepare;
+ }
+
+ if (callback) {
+ error = callback(dev);
+ suspend_report_result(callback, error);
}
- End:
device_unlock(dev);
return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c50fc41..c5d358837461 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c = dev->power.constraints;
+
+ return c ? pm_qos_read_value(c) : 0;
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
*/
s32 dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c;
unsigned long flags;
- s32 ret = 0;
+ s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
-
- c = dev->power.constraints;
- if (c)
- ret = pm_qos_read_value(c);
-
+ ret = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int error = -ENODEV;
+
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ if (ancestor)
+ error = dev_pm_qos_add_request(ancestor, req, value);
+
+ if (error)
+ req->dev = NULL;
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443bca8f..541f821d4ea6 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_idle;
+
if (callback)
__rpm_callback(callback, dev);
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
+struct rpm_qos_data {
+ ktime_t time_now;
+ s64 constraint_ns;
+};
+
+/**
+ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
+ * @dev: Device whose timing data to use.
+ * @data: PM QoS constraint data to update.
+ *
+ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
+ * to by @data.
+ */
+static int rpm_update_qos_constraint(struct device *dev, void *data)
+{
+ struct rpm_qos_data *qos = data;
+ unsigned long flags;
+ s64 delta_ns;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.max_time_suspended_ns < 0)
+ goto out;
+
+ delta_ns = dev->power.max_time_suspended_ns -
+ ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
+ if (delta_ns <= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
+ qos->constraint_ns = delta_ns;
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
+ struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ qos.constraint_ns = __dev_pm_qos_read_value(dev);
+ if (qos.constraint_ns < 0) {
+ /* Negative constraint means "never suspend". */
+ retval = -EPERM;
+ goto out;
+ }
+ qos.constraint_ns *= NSEC_PER_USEC;
+ qos.time_now = ktime_get();
+
__update_runtime_status(dev, RPM_SUSPENDING);
+ if (!dev->power.ignore_children) {
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = device_for_each_child(dev, &qos,
+ rpm_update_qos_constraint);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ if (retval)
+ goto fail;
+ }
+
+ dev->power.suspend_time = qos.time_now;
+ dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
+
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_suspend;
+
retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ if (retval)
+ goto fail;
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
- wake_up_all(&dev->power.wait_queue);
- goto out;
- }
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
+
+ fail:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+ dev->power.deferred_resume = false;
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
}
/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_resume;
+
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
+
+/**
+ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
+ * @dev: Device to handle.
+ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
+ *
+ * Update the device's power.max_time_suspended_ns field by subtracting
+ * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
+ * never negative.
+ */
+void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
+ if (dev->power.max_time_suspended_ns > delta_ns)
+ dev->power.max_time_suspended_ns -= delta_ns;
+ else
+ dev->power.max_time_suspended_ns = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c86162..6c3defa50845 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
add_wait_queue(&thread->wait_q, &wait);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f0491037080..464fa2147dfb 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
comment "DEVFREQ Drivers"
+config ARM_EXYNOS4_BUS_DEVFREQ
+ bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+ and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+ It reads PPMU counters of memory controllers and adjusts
+ the operating frequencies and voltages with OPP support.
+ To operate with optimal voltages, ASV support is required
+ (CONFIG_EXYNOS_ASV).
+
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89e970a..8c464234f7e7 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9cb8c5..c189b82f5ece 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!IS_ERR(devfreq)) {
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
err = -EINVAL;
- goto out;
+ goto err_out;
}
}
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
dev_err(dev, "%s: Unable to create devfreq for the device\n",
__func__);
err = -ENOMEM;
- goto out;
+ goto err_out;
}
mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->next_polling);
}
mutex_unlock(&devfreq_list_lock);
- goto out;
+out:
+ return devfreq;
+
err_init:
device_unregister(&devfreq->dev);
err_dev:
mutex_unlock(&devfreq->lock);
kfree(devfreq);
-out:
- if (err)
- return ERR_PTR(err);
- else
- return devfreq;
+err_out:
+ return ERR_PTR(err);
}
/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 000000000000..6460577d6701
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ * This version supports EXYNOS4210 only. This changes bus frequencies
+ * and vddint voltages. Exynos4412/4212 should be able to be supported
+ * with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT 1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+ TYPE_BUSF_EXYNOS4210,
+ TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO 40
+
+enum ppmu_counter {
+ PPMU_PMNCNT0 = 0,
+ PPMU_PMCCNT1,
+ PPMU_PMNCNT2,
+ PPMU_PMNCNT3,
+ PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+ void __iomem *hw_base;
+ unsigned int ccnt;
+ unsigned int event;
+ unsigned int count[PPMU_PMNCNT_MAX];
+ bool ccnt_overflow;
+ bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ LV_2,
+ LV_3,
+ LV_4,
+ _LV_END
+};
+#define EX4210_LV_MAX LV_2
+#define EX4x12_LV_MAX LV_4
+#define EX4210_LV_NUM (LV_2 + 1)
+#define EX4x12_LV_NUM (LV_4 + 1)
+
+struct busfreq_data {
+ enum exynos4_busf_type type;
+ struct device *dev;
+ struct devfreq *devfreq;
+ bool disabled;
+ struct regulator *vdd_int;
+ struct regulator *vdd_mif; /* Exynos4412/4212 only */
+ struct opp *curr_opp;
+ struct exynos4_ppmu dmc[2];
+
+ struct notifier_block pm_notifier;
+ struct mutex lock;
+
+ /* Dividers calculated at boot/probe-time */
+ unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+ unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+ {LV_0, 400000, 1150000},
+ {LV_1, 267000, 1050000},
+ {LV_2, 133000, 1025000},
+ {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+ {LV_0, 400000, 1100000},
+ {LV_1, 267000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 950000},
+ {LV_4, 100000, 950000},
+ {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+ {LV_0, 200000, 1000000},
+ {LV_1, 160000, 950000},
+ {LV_2, 133000, 925000},
+ {LV_3, 100000, 900000},
+ {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+ {1150000, 1050000, 1050000},
+ {1125000, 1025000, 1025000},
+ {1100000, 1000000, 1000000},
+ {1075000, 975000, 975000},
+ {1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+ /* 400 267 160 133 100 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+ {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+ /* 200 160 133 100 */
+ {1000000, 950000, 925000, 900000}, /* ASV0 */
+ {975000, 925000, 925000, 900000}, /* ASV1 */
+ {950000, 925000, 900000, 875000}, /* ASV2 */
+ {950000, 900000, 900000, 875000}, /* ASV3 */
+ {925000, 875000, 875000, 875000}, /* ASV4 */
+ {900000, 850000, 850000, 850000}, /* ASV5 */
+ {900000, 850000, 850000, 850000}, /* ASV6 */
+ {900000, 850000, 850000, 850000}, /* ASV7 */
+ {900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+ /* DMC L1: 266.7MHz */
+ { 4, 1, 1, 2, 1, 1, 3, 1 },
+ /* DMC L2: 133MHz */
+ { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+ /* ACLK200 L1: 160MHz */
+ { 4, 7, 5, 6, 1 },
+ /* ACLK200 L2: 133MHz */
+ { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+ /* ACLK_GDL/R L3: 133MHz */
+ { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP}
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1, 1, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 1, 1, 2, 1, 1},
+ /* DMC L2: 160MHz */
+ {5, 1, 1, 4, 1, 1},
+ /* DMC L3: 133MHz */
+ {5, 1, 1, 5, 1, 1},
+ /* DMC L4: 100MHz */
+ {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+ /*
+ * Clock divider value for following
+ * { G2DACP, DIVC2C, DIVC2C_ACLK }
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 2, 1},
+ /* DMC L2: 160MHz */
+ {5, 4, 1},
+ /* DMC L3: 133MHz */
+ {5, 5, 1},
+ /* DMC L4: 100MHz */
+ {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+ DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+ /*
+ * Clock divider value for following
+ * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+ */
+
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 160MHz */
+ {4, 4, 5},
+ /* SCLK_MFC: 133MHz */
+ {5, 5, 5},
+ /* SCLK_MFC: 100MHz */
+ {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4210_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ break;
+
+ if (index == EX4210_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+ tmp = data->top_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ break;
+
+ if (index == EX4x12_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - DMC1 */
+ tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+ tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+ S5P_CLKDIV_DMC1_C2C_MASK |
+ S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+ S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][1] <<
+ S5P_CLKDIV_DMC1_C2C_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][2] <<
+ S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+ } while (tmp & 0x111111);
+
+ /* Change Divider - TOP */
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+ S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ (exynos4x12_clkdiv_top[index][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4x12_clkdiv_top[index][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4x12_clkdiv_top[index][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4x12_clkdiv_top[index][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - MFC */
+ tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+ tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+ S5P_CLKDIV_MFC_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_MFC);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+ } while (tmp & 0x1);
+
+ /* Change Divider - JPEG */
+ tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+ tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+ S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1);
+
+ /* Change Divider - FIMC0~3 */
+ tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+ tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+ S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1111);
+
+ return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+ /* Reset PPMU */
+ __raw_writel(0x8000000f, ppmu_base + 0xf010);
+ __raw_writel(0x8000000f, ppmu_base + 0xf050);
+ __raw_writel(0x6, ppmu_base + 0xf000);
+ __raw_writel(0x0, ppmu_base + 0xf100);
+
+ /* Set PPMU Event */
+ data->dmc[i].event = 0x6;
+ __raw_writel(((data->dmc[i].event << 12) | 0x1),
+ ppmu_base + 0xfc);
+
+ /* Start PPMU */
+ __raw_writel(0x1, ppmu_base + 0xf000);
+ }
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+ u32 overflow;
+
+ /* Stop PPMU */
+ __raw_writel(0x0, ppmu_base + 0xf000);
+
+ /* Update local data from PPMU */
+ overflow = __raw_readl(ppmu_base + 0xf050);
+
+ data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+ data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+ for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+ data->dmc[i].count[j] = __raw_readl(
+ ppmu_base + (0xf110 + (0x10 * j)));
+ data->dmc[i].count_overflow[j] = overflow & (1 << j);
+ }
+ }
+
+ busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+ int i = 0;
+
+ while (exynos4x12_intclk_table[i].clk) {
+ if (exynos4x12_intclk_table[i].clk <= mifclk)
+ return i;
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+ struct opp *oldopp)
+{
+ int err = 0, tmp;
+ unsigned long volt = opp_get_voltage(opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ /* OPP represents DMC clock + INT voltage */
+ err = regulator_set_voltage(data->vdd_int, volt,
+ MAX_SAFEVOLT);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ /* OPP represents MIF clock + MIF voltage */
+ err = regulator_set_voltage(data->vdd_mif, volt,
+ MAX_SAFEVOLT);
+ if (err)
+ break;
+
+ tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ if (tmp < 0) {
+ err = tmp;
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ }
+ err = regulator_set_voltage(data->vdd_int,
+ exynos4x12_intclk_table[tmp].volt,
+ MAX_SAFEVOLT);
+ /* Try to recover */
+ if (err)
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+ int err = 0;
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+ unsigned long freq = opp_get_freq(opp);
+
+ if (old_freq == freq)
+ return 0;
+
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+ mutex_lock(&data->lock);
+
+ if (data->disabled)
+ goto out;
+
+ if (old_freq < freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ if (old_freq != freq) {
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto out;
+
+ if (old_freq > freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ data->curr_opp = opp;
+out:
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+ u64 p0 = data->dmc[0].count[0];
+ u64 p1 = data->dmc[1].count[0];
+
+ p0 *= data->dmc[1].ccnt;
+ p1 *= data->dmc[0].ccnt;
+
+ if (data->dmc[1].ccnt == 0)
+ return 0;
+
+ if (p0 > p1)
+ return 0;
+ return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ int busier_dmc;
+ int cycles_x2 = 2; /* 2 x cycles */
+ void __iomem *addr;
+ u32 timing;
+ u32 memctrl;
+
+ exynos4_read_ppmu(data);
+ busier_dmc = exynos4_get_busier_dmc(data);
+ stat->current_frequency = opp_get_freq(data->curr_opp);
+
+ if (busier_dmc)
+ addr = S5P_VA_DMC1;
+ else
+ addr = S5P_VA_DMC0;
+
+ memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+ timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+ switch ((memctrl >> 8) & 0xf) {
+ case 0x4: /* DDR2 */
+ cycles_x2 = ((timing >> 16) & 0xf) * 2;
+ break;
+ case 0x5: /* LPDDR2 */
+ case 0x6: /* DDR3 */
+ cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+ break;
+ default:
+ pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+ (memctrl >> 8) & 0xf);
+ return -EINVAL;
+ }
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+ stat->total_time = data->dmc[busier_dmc].ccnt;
+
+ /* If the counters have overflown, retry */
+ if (data->dmc[busier_dmc].ccnt_overflow ||
+ data->dmc[busier_dmc].count_overflow[0])
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+ .initial_freq = 400000,
+ .polling_ms = 50,
+ .target = exynos4_bus_target,
+ .get_dev_status = exynos4_bus_get_dev_status,
+ .exit = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+ u32 tmp;
+ int mgrp;
+ int i, err = 0;
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK |
+ S5P_CLKDIV_DMC0_COPY2_MASK |
+ S5P_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][6] <<
+ S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][7] <<
+ S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4210_clkdiv_top[i][0] <<
+ S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ (exynos4210_clkdiv_top[i][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4210_clkdiv_top[i][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4210_clkdiv_top[i][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4210_clkdiv_top[i][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ data->top_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+ /* Use merged grouping for voltage */
+ switch (tmp) {
+ case 0:
+ mgrp = 0;
+ break;
+ case 1:
+ case 2:
+ mgrp = 1;
+ break;
+ case 3:
+ case 4:
+ mgrp = 2;
+ break;
+ case 5:
+ case 6:
+ mgrp = 3;
+ break;
+ case 7:
+ mgrp = 4;
+ break;
+ default:
+ pr_warn("Unknown ASV Group. Use max voltage.\n");
+ mgrp = 0;
+ }
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++)
+ exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ exynos4210_busclk_table[i].volt);
+ if (err) {
+ dev_err(data->dev, "Cannot add opp entries.\n");
+ return err;
+ }
+ }
+
+
+ return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+ unsigned int i;
+ unsigned int tmp;
+ int ret;
+
+ /* Enable pause function for DREX2 DVFS */
+ tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+ tmp |= DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ if (tmp > 8)
+ tmp = 0;
+ pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4x12_mif_step_50[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4x12_int_volt[tmp][i];
+ }
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ exynos4x12_mifclk_table[i].volt);
+ if (ret) {
+ dev_err(data->dev, "Fail to add opp entries.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ pm_notifier);
+ struct opp *opp;
+ unsigned long maxfreq = ULONG_MAX;
+ int err = 0;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /* Set Fastest and Deactivate DVFS */
+ mutex_lock(&data->lock);
+
+ data->disabled = true;
+
+ opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto unlock;
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto unlock;
+
+ data->curr_opp = opp;
+unlock:
+ mutex_unlock(&data->lock);
+ if (err)
+ return err;
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* Reactivate */
+ mutex_lock(&data->lock);
+ data->disabled = false;
+ mutex_unlock(&data->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+ struct busfreq_data *data;
+ struct opp *opp;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ data->type = pdev->id_entry->driver_data;
+ data->dmc[0].hw_base = S5P_VA_DMC0;
+ data->dmc[1].hw_base = S5P_VA_DMC1;
+ data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+ data->dev = dev;
+ mutex_init(&data->lock);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_init_tables(data);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_init_tables(data);
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ }
+ if (err)
+ goto err_regulator;
+
+ data->vdd_int = regulator_get(dev, "vdd_int");
+ if (IS_ERR(data->vdd_int)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+ err = PTR_ERR(data->vdd_int);
+ goto err_regulator;
+ }
+ if (data->type == TYPE_BUSF_EXYNOS4x12) {
+ data->vdd_mif = regulator_get(dev, "vdd_mif");
+ if (IS_ERR(data->vdd_mif)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+ err = PTR_ERR(data->vdd_mif);
+ regulator_put(data->vdd_int);
+ goto err_regulator;
+
+ }
+ }
+
+ opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+ exynos4_devfreq_profile.initial_freq);
+ err = PTR_ERR(opp);
+ goto err_opp_add;
+ }
+ data->curr_opp = opp;
+
+ platform_set_drvdata(pdev, data);
+
+ busfreq_mon_reset(data);
+
+ data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+ &devfreq_simple_ondemand, NULL);
+ if (IS_ERR(data->devfreq)) {
+ err = PTR_ERR(data->devfreq);
+ goto err_opp_add;
+ }
+
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ err = register_pm_notifier(&data->pm_notifier);
+ if (err) {
+ dev_err(dev, "Failed to setup pm notifier\n");
+ goto err_devfreq_add;
+ }
+
+ return 0;
+err_devfreq_add:
+ devfreq_remove_device(data->devfreq);
+err_opp_add:
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ regulator_put(data->vdd_int);
+err_regulator:
+ kfree(data);
+ return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&data->pm_notifier);
+ devfreq_remove_device(data->devfreq);
+ regulator_put(data->vdd_int);
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ kfree(data);
+
+ return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ busfreq_mon_reset(data);
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+ .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+ { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+ { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+ .probe = exynos4_busfreq_probe,
+ .remove = __devexit_p(exynos4_busfreq_remove),
+ .id_table = exynos4_busfreq_id,
+ .driver = {
+ .name = "exynos4-busfreq",
+ .owner = THIS_MODULE,
+ .pm = &exynos4_busfreq_pm,
+ },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+ return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+ platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d8641cf5c..2b8661b54eaf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
+static void dmatest_callback(void *arg)
{
- complete(completion);
+ struct dmatest_done *done = arg;
+
+ done->done = true;
+ wake_up_all(done->wait);
}
/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
*/
static int dmatest_func(void *data)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
+ struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
- set_freezable_with_signal();
+ set_freezable();
ret = -ENOMEM;
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
- struct completion cmp;
- unsigned long start, tmo, end = 0 /* compiler... */;
- bool reload = true;
u8 align = 0;
total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
continue;
}
- init_completion(&cmp);
+ done.done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &cmp;
+ tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- do {
- start = jiffies;
- if (reload)
- end = start + msecs_to_jiffies(timeout);
- else if (end <= start)
- end = start + 1;
- tmo = wait_for_completion_interruptible_timeout(&cmp,
- end - start);
- reload = try_to_freeze();
- } while (tmo == -ERESTARTSYS);
+ wait_event_freezable_timeout(done_wait, done.done,
+ msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (tmo == 0) {
+ if (!done.done) {
+ /*
+ * We're leaving the timed out dma operation with
+ * dangling pointer to done_wait. To make this
+ * correct, we'll need to allocate wait_done for
+ * each test iteration and perform "who's gonna
+ * free it this time?" dancing. For now, just
+ * leave it dangling.
+ */
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab371358b33..8825fe37d433 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +47,7 @@ struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct st1232_ts_finger finger[MAX_FINGERS];
+ struct dev_pm_qos_request low_latency_req;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
}
/* SYN_MT_REPORT only if no contact */
- if (!count)
+ if (!count) {
input_mt_sync(input_dev);
+ if (ts->low_latency_req.dev) {
+ dev_pm_qos_remove_request(&ts->low_latency_req);
+ ts->low_latency_req.dev = NULL;
+ }
+ } else if (!ts->low_latency_req.dev) {
+ /* First contact, request 100 us latency. */
+ dev_pm_qos_add_ancestor_request(&ts->client->dev,
+ &ts->low_latency_req, 100);
+ }
/* SYN_REPORT */
input_sync(input_dev);
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45ffb096..c6b456ad7342 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
static const unsigned max_i2c_errors = 100;
int ret;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
int i;
union {
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 212868eb6f5f..e6e59a078ef4 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
- refrigerator();
+ try_to_freeze();
if (change_speed(stir, stir->speed))
break;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 455e1522253e..62533c105da4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
- unsigned int change_detector, must_reset;
+ unsigned int change_detector;
unsigned int poll_freq;
+ bool was_frozen;
mutex_lock(&hotkey_thread_mutex);
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
t = 100; /* should never happen... */
}
t = msleep_interruptible(t);
- if (unlikely(kthread_should_stop()))
+ if (unlikely(kthread_freezable_should_stop(&was_frozen)))
break;
- must_reset = try_to_freeze();
- if (t > 0 && !must_reset)
+
+ if (t > 0 && !was_frozen)
continue;
mutex_lock(&hotkey_thread_data_mutex);
- if (must_reset || hotkey_config_change != change_detector) {
+ if (was_frozen || hotkey_config_change != change_detector) {
/* forget old state on thaw or config change */
si = so;
t = 0;
@@ -2528,10 +2529,6 @@ exit:
static void hotkey_poll_stop_sync(void)
{
if (tpacpi_hotkey_task) {
- if (frozen(tpacpi_hotkey_task) ||
- freezing(tpacpi_hotkey_task))
- thaw_process(tpacpi_hotkey_task);
-
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
mutex_lock(&hotkey_thread_mutex);
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index e85512dd9c72..e53e449b4eca 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+ d->skip_suspend = desc->skip_syscore_suspend;
+
nr_intc_controllers++;
return 0;
@@ -386,6 +388,9 @@ static int intc_suspend(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
@@ -409,6 +414,9 @@ static void intc_resume(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 1c2722e5af3f..b0e9155ff739 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -67,6 +67,7 @@ struct intc_desc_int {
struct intc_window *window;
unsigned int nr_windows;
struct irq_chip chip;
+ bool skip_suspend;
};
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 115635f95024..a7feb3e328a0 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69415a1..aa84b3d77274 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
dev_dbg(dev, "device found\n");
- set_freezable_with_signal();
+ set_freezable();
+
/*
* Wait for the timeout to expire or for a disconnect
*
@@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
* fail to freeze, but we can't be non-freezable either. Nor can
* khubd freeze while waiting for scanning to complete as it may
* hold the device lock, causing a hang when suspending devices.
- * So we request a fake signal when freezing and use
- * interruptible sleep to kick us out of our wait early when
- * freezing happens.
+ * So instead of using wait_event_freezable(), explicitly test
+ * for (DONT_SCAN || freezing) in interruptible wait and proceed
+ * if any of DONT_SCAN, freezing or timeout has happened.
*/
if (delay_use > 0) {
dev_dbg(dev, "waiting for device to settle "
"before scanning\n");
wait_event_interruptible_timeout(us->delay_wait,
- test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
- delay_use * HZ);
+ test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
+ freezing(current), delay_use * HZ);
}
/* If the device is still connected, perform the scanning */
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0b394580d860..0cc20b35c1c4 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -334,7 +334,7 @@ again:
if (freezing(current)) {
worker->working = 0;
spin_unlock_irq(&worker->lock);
- refrigerator();
+ try_to_freeze();
} else {
spin_unlock_irq(&worker->lock);
if (!kthread_should_stop()) {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f44b3928dc2d..f99a099a7747 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
btrfs_run_defrag_inodes(root->fs_info);
}
- if (freezing(current)) {
- refrigerator();
- } else {
+ if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop())
schedule();
@@ -1635,9 +1633,7 @@ sleep:
wake_up_process(root->fs_info->cleaner_kthread);
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
- if (freezing(current)) {
- refrigerator();
- } else {
+ if (!try_to_freeze()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
!btrfs_transaction_blocked(root->fs_info))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6733b3736b3b..64e2529ae9bb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2882,8 +2882,7 @@ cont_thread:
}
mutex_unlock(&eli->li_list_mtx);
- if (freezing(current))
- refrigerator();
+ try_to_freeze();
cur = jiffies;
if ((time_after_eq(cur, next_wakeup)) ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 80a4574028f1..e2951506434d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -936,7 +936,7 @@ int bdi_writeback_thread(void *data)
trace_writeback_thread_start(bdi);
- while (!kthread_should_stop()) {
+ while (!kthread_freezable_should_stop(NULL)) {
/*
* Remove own delayed wake-up timer, since we are already awake
* and we'll take care of the preriodic write-back.
@@ -966,8 +966,6 @@ int bdi_writeback_thread(void *data)
*/
schedule();
}
-
- try_to_freeze();
}
/* Flush any work that raced with us exiting */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 2731e657cf7f..756fae9eaf8f 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -951,8 +951,8 @@ int gfs2_logd(void *data)
wake_up(&sdp->sd_log_waitq);
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
- if (freezing(current))
- refrigerator();
+
+ try_to_freeze();
do {
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 98a01db1f6dc..a45b21b03915 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1417,8 +1417,8 @@ int gfs2_quotad(void *data)
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list(sdp);
- if (freezing(current))
- refrigerator();
+ try_to_freeze();
+
t = min(quotad_timeo, statfs_timeo);
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index fea8dd661d2b..a96cff0c5f1d 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -166,7 +166,7 @@ loop:
*/
jbd_debug(1, "Now suspending kjournald\n");
spin_unlock(&journal->j_state_lock);
- refrigerator();
+ try_to_freeze();
spin_lock(&journal->j_state_lock);
} else {
/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0fa0123151d3..c0a5f9f1b127 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -173,7 +173,7 @@ loop:
*/
jbd_debug(1, "Now suspending kjournald2\n");
write_unlock(&journal->j_state_lock);
- refrigerator();
+ try_to_freeze();
write_lock(&journal->j_state_lock);
} else {
/*
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index cc5f811ed383..2eb952c41a69 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
if (freezing(current)) {
spin_unlock_irq(&log_redrive_lock);
- refrigerator();
+ try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&log_redrive_lock);
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index af9606057dde..bb8b661bcc50 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
if (freezing(current)) {
LAZY_UNLOCK(flags);
- refrigerator();
+ try_to_freeze();
} else {
DECLARE_WAITQUEUE(wq, current);
@@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
if (freezing(current)) {
TXN_UNLOCK();
- refrigerator();
+ try_to_freeze();
} else {
set_current_state(TASK_INTERRUPTIBLE);
TXN_UNLOCK();
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6f00086e340f..81db25e92e10 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/nfs_xdr.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/freezer.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
- schedule();
+ freezable_schedule();
return 0;
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d4bc9ed91748..91943953a370 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -17,6 +17,7 @@
#include <linux/nfs_page.h>
#include <linux/lockd/bind.h>
#include <linux/nfs_mount.h>
+#include <linux/freezer.h>
#include "iostat.h"
#include "internal.h"
@@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
break;
- schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!fatal_signal_pending(current));
return res;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d9f4d78c3413..dcda0ba7af60 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -55,6 +55,7 @@
#include <linux/sunrpc/bc_xprt.h>
#include <linux/xattr.h>
#include <linux/utsname.h>
+#include <linux/freezer.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
- schedule_timeout_killable(*timeout);
+ freezable_schedule_timeout_killable(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
@@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- schedule_timeout_killable(timeout);
+ freezable_schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index f48125da198a..0c672588fe5a 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -41,6 +41,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/lockd/bind.h>
+#include <linux/freezer.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
res = rpc_call_sync(clnt, msg, flags);
if (res != -EKEYEXPIRED)
break;
- schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!fatal_signal_pending(current));
return res;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bb24ab6c282f..0e72ad6f22aa 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
if (freezing(current)) {
spin_unlock(&sci->sc_state_lock);
- refrigerator();
+ try_to_freeze();
spin_lock(&sci->sc_state_lock);
} else {
DEFINE_WAIT(wait);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2277bcae395f..ce6249dae90c 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1702,7 +1702,7 @@ xfsbufd(
struct blk_plug plug;
if (unlikely(freezing(current)))
- refrigerator();
+ try_to_freeze();
/* sleep for a long time if there is nothing to do. */
if (list_empty(&target->bt_delwri_queue))
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index a5386e3ee756..0ab54e16a91f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -5,71 +5,58 @@
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/atomic.h>
#ifdef CONFIG_FREEZER
+extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+extern bool pm_freezing; /* PM freezing in effect */
+extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
+
/*
* Check if a process has been frozen
*/
-static inline int frozen(struct task_struct *p)
+static inline bool frozen(struct task_struct *p)
{
return p->flags & PF_FROZEN;
}
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
- return test_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-/*
- * Request that a process be frozen
- */
-static inline void set_freeze_flag(struct task_struct *p)
-{
- set_tsk_thread_flag(p, TIF_FREEZE);
-}
+extern bool freezing_slow_path(struct task_struct *p);
/*
- * Sometimes we may need to cancel the previous 'freeze' request
+ * Check if there is a request to freeze a process
*/
-static inline void clear_freeze_flag(struct task_struct *p)
-{
- clear_tsk_thread_flag(p, TIF_FREEZE);
-}
-
-static inline bool should_send_signal(struct task_struct *p)
+static inline bool freezing(struct task_struct *p)
{
- return !(p->flags & PF_FREEZER_NOSIG);
+ if (likely(!atomic_read(&system_freezing_cnt)))
+ return false;
+ return freezing_slow_path(p);
}
/* Takes and releases task alloc lock using task_lock() */
-extern int thaw_process(struct task_struct *p);
+extern void __thaw_task(struct task_struct *t);
-extern void refrigerator(void);
+extern bool __refrigerator(bool check_kthr_stop);
extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
-static inline int try_to_freeze(void)
+static inline bool try_to_freeze(void)
{
- if (freezing(current)) {
- refrigerator();
- return 1;
- } else
- return 0;
+ might_sleep();
+ if (likely(!freezing(current)))
+ return false;
+ return __refrigerator(false);
}
-extern bool freeze_task(struct task_struct *p, bool sig_only);
-extern void cancel_freezing(struct task_struct *p);
+extern bool freeze_task(struct task_struct *p);
+extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_freezing_or_frozen(struct task_struct *task);
+extern bool cgroup_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+static inline bool cgroup_freezing(struct task_struct *task)
{
- return 0;
+ return false;
}
#endif /* !CONFIG_CGROUP_FREEZER */
@@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
* appropriately in case the child has exited before the freezing of tasks is
* complete. However, we don't want kernel threads to be frozen in unexpected
* places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
- * parents. Fortunately, in the ____call_usermodehelper() case the parent won't
- * really block freeze_processes(), since ____call_usermodehelper() (the child)
- * does a little before exec/exit and it can't be frozen before waking up the
- * parent.
+ * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
+ * parent won't really block freeze_processes(), since ____call_usermodehelper()
+ * (the child) does a little before exec/exit and it can't be frozen before
+ * waking up the parent.
*/
-/*
- * If the current task is a user space one, tell the freezer not to count it as
- * freezable.
- */
+
+/* Tell the freezer not to count the current task as freezable. */
static inline void freezer_do_not_count(void)
{
- if (current->mm)
- current->flags |= PF_FREEZER_SKIP;
+ current->flags |= PF_FREEZER_SKIP;
}
/*
- * If the current task is a user space one, tell the freezer to count it as
- * freezable again and try to freeze it.
+ * Tell the freezer to count the current task as freezable again and try to
+ * freeze it.
*/
static inline void freezer_count(void)
{
- if (current->mm) {
- current->flags &= ~PF_FREEZER_SKIP;
- try_to_freeze();
- }
+ current->flags &= ~PF_FREEZER_SKIP;
+ try_to_freeze();
}
/*
@@ -118,21 +99,29 @@ static inline int freezer_should_skip(struct task_struct *p)
}
/*
- * Tell the freezer that the current task should be frozen by it
+ * These macros are intended to be used whenever you want allow a task that's
+ * sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
+ * that neither return any clear indication of whether a freeze event happened
+ * while in this function.
*/
-static inline void set_freezable(void)
-{
- current->flags &= ~PF_NOFREEZE;
-}
-/*
- * Tell the freezer that the current task should be frozen by it and that it
- * should send a fake signal to the task to freeze it.
- */
-static inline void set_freezable_with_signal(void)
-{
- current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
-}
+/* Like schedule(), but should not block the freezer. */
+#define freezable_schedule() \
+({ \
+ freezer_do_not_count(); \
+ schedule(); \
+ freezer_count(); \
+})
+
+/* Like schedule_timeout_killable(), but should not block the freezer. */
+#define freezable_schedule_timeout_killable(timeout) \
+({ \
+ long __retval; \
+ freezer_do_not_count(); \
+ __retval = schedule_timeout_killable(timeout); \
+ freezer_count(); \
+ __retval; \
+})
/*
* Freezer-friendly wrappers around wait_event_interruptible(),
@@ -152,47 +141,51 @@ static inline void set_freezable_with_signal(void)
#define wait_event_freezable(wq, condition) \
({ \
int __retval; \
- do { \
+ for (;;) { \
__retval = wait_event_interruptible(wq, \
(condition) || freezing(current)); \
- if (__retval && !freezing(current)) \
+ if (__retval || (condition)) \
break; \
- else if (!(condition)) \
- __retval = -ERESTARTSYS; \
- } while (try_to_freeze()); \
+ try_to_freeze(); \
+ } \
__retval; \
})
-
#define wait_event_freezable_timeout(wq, condition, timeout) \
({ \
long __retval = timeout; \
- do { \
+ for (;;) { \
__retval = wait_event_interruptible_timeout(wq, \
(condition) || freezing(current), \
__retval); \
- } while (try_to_freeze()); \
+ if (__retval <= 0 || (condition)) \
+ break; \
+ try_to_freeze(); \
+ } \
__retval; \
})
+
#else /* !CONFIG_FREEZER */
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void set_freeze_flag(struct task_struct *p) {}
-static inline void clear_freeze_flag(struct task_struct *p) {}
-static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline bool frozen(struct task_struct *p) { return false; }
+static inline bool freezing(struct task_struct *p) { return false; }
+static inline void __thaw_task(struct task_struct *t) {}
-static inline void refrigerator(void) {}
+static inline bool __refrigerator(bool check_kthr_stop) { return false; }
static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
-static inline int try_to_freeze(void) { return 0; }
+static inline bool try_to_freeze(void) { return false; }
static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-static inline void set_freezable_with_signal(void) {}
+
+#define freezable_schedule() schedule()
+
+#define freezable_schedule_timeout_killable(timeout) \
+ schedule_timeout_killable(timeout)
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index b16f65390734..722f477c4ef7 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
extern int usermodehelper_disable(void);
extern void usermodehelper_enable(void);
extern bool usermodehelper_is_disabled(void);
+extern void read_lock_usermodehelper(void);
+extern void read_unlock_usermodehelper(void);
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 5cac19b3a266..0714b24c0e45 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void kthread_bind(struct task_struct *k, unsigned int cpu);
int kthread_stop(struct task_struct *k);
int kthread_should_stop(void);
+bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_data(struct task_struct *k);
int kthreadd(void *unused);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 5622fa24e97b..60e9994ef405 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -256,62 +256,34 @@ static inline char *early_platform_driver_setup_func(void) \
}
#endif /* MODULE */
-#ifdef CONFIG_PM_SLEEP
-extern int platform_pm_prepare(struct device *dev);
-extern void platform_pm_complete(struct device *dev);
-#else
-#define platform_pm_prepare NULL
-#define platform_pm_complete NULL
-#endif
-
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
-extern int platform_pm_suspend_noirq(struct device *dev);
extern int platform_pm_resume(struct device *dev);
-extern int platform_pm_resume_noirq(struct device *dev);
#else
#define platform_pm_suspend NULL
#define platform_pm_resume NULL
-#define platform_pm_suspend_noirq NULL
-#define platform_pm_resume_noirq NULL
#endif
#ifdef CONFIG_HIBERNATE_CALLBACKS
extern int platform_pm_freeze(struct device *dev);
-extern int platform_pm_freeze_noirq(struct device *dev);
extern int platform_pm_thaw(struct device *dev);
-extern int platform_pm_thaw_noirq(struct device *dev);
extern int platform_pm_poweroff(struct device *dev);
-extern int platform_pm_poweroff_noirq(struct device *dev);
extern int platform_pm_restore(struct device *dev);
-extern int platform_pm_restore_noirq(struct device *dev);
#else
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
#define platform_pm_poweroff NULL
#define platform_pm_restore NULL
-#define platform_pm_freeze_noirq NULL
-#define platform_pm_thaw_noirq NULL
-#define platform_pm_poweroff_noirq NULL
-#define platform_pm_restore_noirq NULL
#endif
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
- .prepare = platform_pm_prepare, \
- .complete = platform_pm_complete, \
.suspend = platform_pm_suspend, \
.resume = platform_pm_resume, \
.freeze = platform_pm_freeze, \
.thaw = platform_pm_thaw, \
.poweroff = platform_pm_poweroff, \
- .restore = platform_pm_restore, \
- .suspend_noirq = platform_pm_suspend_noirq, \
- .resume_noirq = platform_pm_resume_noirq, \
- .freeze_noirq = platform_pm_freeze_noirq, \
- .thaw_noirq = platform_pm_thaw_noirq, \
- .poweroff_noirq = platform_pm_poweroff_noirq, \
- .restore_noirq = platform_pm_restore_noirq,
+ .restore = platform_pm_restore,
#else
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 3f3ed83a9aa5..e4982ac3fbbc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
-/*
- * Use this for subsystems (bus types, device types, device classes) that don't
- * need any special suspend/resume handling in addition to invoking the PM
- * callbacks provided by device drivers supporting both the system sleep PM and
- * runtime PM, make the pm member point to generic_subsys_pm_ops.
- */
-#ifdef CONFIG_PM
-extern struct dev_pm_ops generic_subsys_pm_ops;
-#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
-#else
-#define GENERIC_SUBSYS_PM_OPS NULL
-#endif
-
/**
* PM_EVENT_ messages
*
@@ -521,6 +508,8 @@ struct dev_pm_info {
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
+ ktime_t suspend_time;
+ s64 max_time_suspended_ns;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
struct pm_qos_constraints *constraints;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 65633e5a2bc0..a03a0ad998b8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -10,6 +10,7 @@
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/err.h>
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -21,6 +22,23 @@ enum gpd_status {
struct dev_power_governor {
bool (*power_down_ok)(struct dev_pm_domain *domain);
+ bool (*stop_ok)(struct device *dev);
+};
+
+struct gpd_dev_ops {
+ int (*start)(struct device *dev);
+ int (*stop)(struct device *dev);
+ int (*save_state)(struct device *dev);
+ int (*restore_state)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*suspend_late)(struct device *dev);
+ int (*resume_early)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*freeze_late)(struct device *dev);
+ int (*thaw_early)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ bool (*active_wakeup)(struct device *dev);
};
struct generic_pm_domain {
@@ -32,6 +50,7 @@ struct generic_pm_domain {
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
+ char *name;
unsigned int in_progress; /* Number of devices being suspended now */
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
@@ -44,10 +63,13 @@ struct generic_pm_domain {
bool suspend_power_off; /* Power status before system suspend */
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
+ s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
- int (*start_device)(struct device *dev);
- int (*stop_device)(struct device *dev);
- bool (*active_wakeup)(struct device *dev);
+ s64 power_on_latency_ns;
+ struct gpd_dev_ops dev_ops;
+ s64 break_even_ns; /* Power break even for the entire domain. */
+ s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
+ ktime_t power_off_time;
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -62,8 +84,18 @@ struct gpd_link {
struct list_head slave_node;
};
+struct gpd_timing_data {
+ s64 stop_latency_ns;
+ s64 start_latency_ns;
+ s64 save_state_latency_ns;
+ s64 restore_state_latency_ns;
+ s64 break_even_ns;
+};
+
struct generic_pm_domain_data {
struct pm_domain_data base;
+ struct gpd_dev_ops ops;
+ struct gpd_timing_data td;
bool need_restore;
};
@@ -73,18 +105,54 @@ static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *
}
#ifdef CONFIG_PM_GENERIC_DOMAINS
-extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
- struct device *dev);
+static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+{
+ return to_gpd_data(dev->power.subsys_data->domain_data);
+}
+
+extern struct dev_power_governor simple_qos_governor;
+
+extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
+extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td);
+
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev);
extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
+extern int pm_genpd_add_callbacks(struct device *dev,
+ struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td);
+extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
extern void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
+
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+
+extern bool default_stop_ok(struct device *dev);
+
+extern struct dev_power_governor pm_domain_always_on_gov;
#else
+
+static inline struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev)
{
@@ -105,14 +173,35 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off) {}
+static inline int pm_genpd_add_callbacks(struct device *dev,
+ struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
+static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_init(struct generic_pm_domain *genpd, bool is_off)
+{
+}
static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
{
return -ENOSYS;
}
+static inline bool default_stop_ok(struct device *dev)
+{
+ return false;
+}
+#define pm_domain_always_on_gov NULL
#endif
+static inline int pm_genpd_remove_callbacks(struct device *dev)
+{
+ return __pm_genpd_remove_callbacks(dev, true);
+}
+
#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
extern void pm_genpd_poweroff_unused(void);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 83b0ea302a80..e5bbcbaa6f57 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -78,6 +78,7 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_request_active(struct pm_qos_request *req);
s32 pm_qos_read_value(struct pm_qos_constraints *c);
+s32 __dev_pm_qos_read_value(struct device *dev);
s32 dev_pm_qos_read_value(struct device *dev);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
s32 value);
@@ -91,6 +92,8 @@ int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value);
#else
static inline int pm_qos_update_target(struct pm_qos_constraints *c,
struct plist_node *node,
@@ -119,6 +122,8 @@ static inline int pm_qos_request_active(struct pm_qos_request *req)
static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
{ return 0; }
+static inline s32 __dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
static inline s32 dev_pm_qos_read_value(struct device *dev)
{ return 0; }
static inline int dev_pm_qos_add_request(struct device *dev,
@@ -150,6 +155,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
{
dev->power.power_state = PMSG_INVALID;
}
+static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+ { return 0; }
#endif
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d3085e72a0ee..609daae7a014 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -45,6 +45,8 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern void pm_runtime_update_max_time_suspended(struct device *dev,
+ s64 delta_ns);
static inline bool pm_children_suspended(struct device *dev)
{
@@ -148,6 +150,9 @@ static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
+static inline void pm_runtime_update_max_time_suspended(struct device *dev,
+ s64 delta_ns) {}
+
#endif /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cf0eb342bcba..ad93e1ec8c65 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FREEZING) == 0)
+ (task->flags & PF_FROZEN) == 0)
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -1787,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
-#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
@@ -1803,7 +1802,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
-#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/*
* Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 5812fefbcedf..b160645f5599 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -95,6 +95,7 @@ struct intc_desc {
unsigned int num_resources;
intc_enum force_enable;
intc_enum force_disable;
+ bool skip_syscore_suspend;
struct intc_hw_desc hw;
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 57a692432f8a..95040cc33107 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/mm.h>
+#include <linux/freezer.h>
#include <asm/errno.h>
#ifdef CONFIG_VT
@@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE 0x0006 /* Restore failed */
+extern struct mutex pm_mutex;
+
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
void restore_processor_state(void);
@@ -351,6 +354,19 @@ extern bool events_check_enabled;
extern bool pm_wakeup_pending(void);
extern bool pm_get_wakeup_count(unsigned int *count);
extern bool pm_save_wakeup_count(unsigned int count);
+
+static inline void lock_system_sleep(void)
+{
+ freezer_do_not_count();
+ mutex_lock(&pm_mutex);
+}
+
+static inline void unlock_system_sleep(void)
+{
+ mutex_unlock(&pm_mutex);
+ freezer_count();
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
@@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
-#endif /* !CONFIG_PM_SLEEP */
-
-extern struct mutex pm_mutex;
-#ifndef CONFIG_HIBERNATE_CALLBACKS
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
-#else
-
-/* Let some subsystems like memory hotadd exclude hibernation */
-
-static inline void lock_system_sleep(void)
-{
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- mutex_unlock(&pm_mutex);
-}
-#endif
+#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
/*
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 213c0351dad8..fcb93fca782d 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
-static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
+bool cgroup_freezing(struct task_struct *task)
{
- enum freezer_state state = task_freezer(task)->state;
- return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
-}
+ enum freezer_state state;
+ bool ret;
-int cgroup_freezing_or_frozen(struct task_struct *task)
-{
- int result;
- task_lock(task);
- result = __cgroup_freezing_or_frozen(task);
- task_unlock(task);
- return result;
+ rcu_read_lock();
+ state = task_freezer(task)->state;
+ ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
+ rcu_read_unlock();
+
+ return ret;
}
/*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
* freezer_can_attach():
* cgroup_mutex (held by caller of can_attach)
*
- * cgroup_freezing_or_frozen():
- * task->alloc_lock (to get task's cgroup)
- *
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
* freezer->lock
* sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
* write_lock css_set_lock (cgroup iterator start)
* task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
- * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
+ * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
* sighand->siglock
*/
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
- kfree(cgroup_freezer(cgroup));
+ struct freezer *freezer = cgroup_freezer(cgroup);
+
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ kfree(freezer);
}
/* task is frozen or will freeze immediately when next it gets woken */
@@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
- rcu_read_lock();
- if (__cgroup_freezing_or_frozen(tsk)) {
- rcu_read_unlock();
- return -EBUSY;
- }
- rcu_read_unlock();
- return 0;
+ return cgroup_freezing(tsk) ? -EBUSY : 0;
}
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
/* Locking avoids race with FREEZING -> THAWED transitions. */
if (freezer->state == CGROUP_FREEZING)
- freeze_task(task, true);
+ freeze_task(task);
spin_unlock_irq(&freezer->lock);
}
@@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
- if (is_task_frozen_enough(task))
+ if (freezing(task) && is_task_frozen_enough(task))
nfrozen++;
}
@@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
unsigned int num_cant_freeze_now = 0;
- freezer->state = CGROUP_FREEZING;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
- if (!freeze_task(task, true))
+ if (!freeze_task(task))
continue;
if (is_task_frozen_enough(task))
continue;
@@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
- thaw_process(task);
- }
+ while ((task = cgroup_iter_next(cgroup, &it)))
+ __thaw_task(task);
cgroup_iter_end(cgroup, &it);
-
- freezer->state = CGROUP_THAWED;
}
static int freezer_change_state(struct cgroup *cgroup,
@@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
spin_lock_irq(&freezer->lock);
update_if_frozen(cgroup, freezer);
- if (goal_state == freezer->state)
- goto out;
switch (goal_state) {
case CGROUP_THAWED:
+ if (freezer->state != CGROUP_THAWED)
+ atomic_dec(&system_freezing_cnt);
+ freezer->state = CGROUP_THAWED;
unfreeze_cgroup(cgroup, freezer);
break;
case CGROUP_FROZEN:
+ if (freezer->state == CGROUP_THAWED)
+ atomic_inc(&system_freezing_cnt);
+ freezer->state = CGROUP_FREEZING;
retval = try_to_freeze_cgroup(cgroup, freezer);
break;
default:
BUG();
}
-out:
+
spin_unlock_irq(&freezer->lock);
return retval;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5ca38d5d238a..2060c6e57027 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -470,7 +470,7 @@ out:
cpu_maps_update_done();
}
-static int alloc_frozen_cpus(void)
+static int __init alloc_frozen_cpus(void)
{
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
@@ -543,7 +543,7 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
}
-int cpu_hotplug_pm_sync_init(void)
+static int __init cpu_hotplug_pm_sync_init(void)
{
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index d579a459309d..d9eab2e4b430 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
- /* We don't want this task to be frozen prematurely */
- clear_freeze_flag(tsk);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
exit_rcu();
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
+ tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */
diff --git a/kernel/fork.c b/kernel/fork.c
index b058c5820ecd..f34f894c4b98 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
- clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 7be56c534397..9815b8d1eed5 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -9,101 +9,114 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
+#include <linux/kthread.h>
-/*
- * freezing is complete, mark current process as frozen
+/* total number of freezing conditions in effect */
+atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+EXPORT_SYMBOL(system_freezing_cnt);
+
+/* indicate whether PM freezing is in effect, protected by pm_mutex */
+bool pm_freezing;
+bool pm_nosig_freezing;
+
+/* protects freezing and frozen transitions */
+static DEFINE_SPINLOCK(freezer_lock);
+
+/**
+ * freezing_slow_path - slow path for testing whether a task needs to be frozen
+ * @p: task to be tested
+ *
+ * This function is called by freezing() if system_freezing_cnt isn't zero
+ * and tests whether @p needs to enter and stay in frozen state. Can be
+ * called under any context. The freezers are responsible for ensuring the
+ * target tasks see the updated state.
*/
-static inline void frozen_process(void)
+bool freezing_slow_path(struct task_struct *p)
{
- if (!unlikely(current->flags & PF_NOFREEZE)) {
- current->flags |= PF_FROZEN;
- smp_wmb();
- }
- clear_freeze_flag(current);
+ if (p->flags & PF_NOFREEZE)
+ return false;
+
+ if (pm_nosig_freezing || cgroup_freezing(p))
+ return true;
+
+ if (pm_freezing && !(p->flags & PF_KTHREAD))
+ return true;
+
+ return false;
}
+EXPORT_SYMBOL(freezing_slow_path);
/* Refrigerator is place where frozen processes are stored :-). */
-void refrigerator(void)
+bool __refrigerator(bool check_kthr_stop)
{
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
- long save;
+ bool was_frozen = false;
+ long save = current->state;
- task_lock(current);
- if (freezing(current)) {
- frozen_process();
- task_unlock(current);
- } else {
- task_unlock(current);
- return;
- }
- save = current->state;
pr_debug("%s entered refrigerator\n", current->comm);
- spin_lock_irq(&current->sighand->siglock);
- recalc_sigpending(); /* We sent fake signal, clean it up */
- spin_unlock_irq(&current->sighand->siglock);
-
- /* prevent accounting of that task to load */
- current->flags |= PF_FREEZING;
-
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!frozen(current))
+
+ spin_lock_irq(&freezer_lock);
+ current->flags |= PF_FROZEN;
+ if (!freezing(current) ||
+ (check_kthr_stop && kthread_should_stop()))
+ current->flags &= ~PF_FROZEN;
+ spin_unlock_irq(&freezer_lock);
+
+ if (!(current->flags & PF_FROZEN))
break;
+ was_frozen = true;
schedule();
}
- /* Remove the accounting blocker */
- current->flags &= ~PF_FREEZING;
-
pr_debug("%s left refrigerator\n", current->comm);
- __set_current_state(save);
+
+ /*
+ * Restore saved task state before returning. The mb'd version
+ * needs to be used; otherwise, it might silently break
+ * synchronization which depends on ordered task state change.
+ */
+ set_current_state(save);
+
+ return was_frozen;
}
-EXPORT_SYMBOL(refrigerator);
+EXPORT_SYMBOL(__refrigerator);
static void fake_signal_wake_up(struct task_struct *p)
{
unsigned long flags;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, 0);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ if (lock_task_sighand(p, &flags)) {
+ signal_wake_up(p, 0);
+ unlock_task_sighand(p, &flags);
+ }
}
/**
- * freeze_task - send a freeze request to given task
- * @p: task to send the request to
- * @sig_only: if set, the request will only be sent if the task has the
- * PF_FREEZER_NOSIG flag unset
- * Return value: 'false', if @sig_only is set and the task has
- * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
+ * freeze_task - send a freeze request to given task
+ * @p: task to send the request to
+ *
+ * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
+ * flag and either sending a fake signal to it or waking it up, depending
+ * on whether it has %PF_FREEZER_NOSIG set.
*
- * The freeze request is sent by setting the tasks's TIF_FREEZE flag and
- * either sending a fake signal to it or waking it up, depending on whether
- * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
- * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
- * TIF_FREEZE flag will not be set.
+ * RETURNS:
+ * %false, if @p is not freezing or already frozen; %true, otherwise
*/
-bool freeze_task(struct task_struct *p, bool sig_only)
+bool freeze_task(struct task_struct *p)
{
- /*
- * We first check if the task is freezing and next if it has already
- * been frozen to avoid the race with frozen_process() which first marks
- * the task as frozen and next clears its TIF_FREEZE.
- */
- if (!freezing(p)) {
- smp_rmb();
- if (frozen(p))
- return false;
-
- if (!sig_only || should_send_signal(p))
- set_freeze_flag(p);
- else
- return false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&freezer_lock, flags);
+ if (!freezing(p) || frozen(p)) {
+ spin_unlock_irqrestore(&freezer_lock, flags);
+ return false;
}
- if (should_send_signal(p)) {
+ if (!(p->flags & PF_KTHREAD)) {
fake_signal_wake_up(p);
/*
* fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
* TASK_RUNNING transition can't race with task state
* testing in try_to_freeze_tasks().
*/
- } else if (sig_only) {
- return false;
} else {
wake_up_state(p, TASK_INTERRUPTIBLE);
}
+ spin_unlock_irqrestore(&freezer_lock, flags);
return true;
}
-void cancel_freezing(struct task_struct *p)
+void __thaw_task(struct task_struct *p)
{
unsigned long flags;
- if (freezing(p)) {
- pr_debug(" clean up: %s\n", p->comm);
- clear_freeze_flag(p);
- spin_lock_irqsave(&p->sighand->siglock, flags);
- recalc_sigpending_and_wake(p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- }
-}
-
-static int __thaw_process(struct task_struct *p)
-{
- if (frozen(p)) {
- p->flags &= ~PF_FROZEN;
- return 1;
- }
- clear_freeze_flag(p);
- return 0;
+ /*
+ * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
+ * be visible to @p as waking up implies wmb. Waking up inside
+ * freezer_lock also prevents wakeups from leaking outside
+ * refrigerator.
+ */
+ spin_lock_irqsave(&freezer_lock, flags);
+ if (frozen(p))
+ wake_up_process(p);
+ spin_unlock_irqrestore(&freezer_lock, flags);
}
-/*
- * Wake up a frozen process
+/**
+ * set_freezable - make %current freezable
*
- * task_lock() is needed to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails. Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
+ * Mark %current freezable and enter refrigerator if necessary.
*/
-int thaw_process(struct task_struct *p)
+bool set_freezable(void)
{
- task_lock(p);
- if (__thaw_process(p) == 1) {
- task_unlock(p);
- wake_up_process(p);
- return 1;
- }
- task_unlock(p);
- return 0;
+ might_sleep();
+
+ /*
+ * Modify flags while holding freezer_lock. This ensures the
+ * freezer notices that we aren't frozen yet or the freezing
+ * condition is visible to try_to_freeze() below.
+ */
+ spin_lock_irq(&freezer_lock);
+ current->flags &= ~PF_NOFREEZE;
+ spin_unlock_irq(&freezer_lock);
+
+ return try_to_freeze();
}
-EXPORT_SYMBOL(thaw_process);
+EXPORT_SYMBOL(set_freezable);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index dc7bc0829286..090ee10d9604 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1523,7 +1523,7 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1576,7 +1576,7 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
#endif
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a4bea97c75b6..a0a88543934e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -36,6 +36,7 @@
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
+#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <trace/events/module.h>
@@ -50,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
+static DECLARE_RWSEM(umhelper_sem);
#ifdef CONFIG_MODULES
@@ -275,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
* (used for preventing user land processes from being created after the user
* land has been frozen during a system-wide hibernation or suspend operation).
+ * Should always be manipulated under umhelper_sem acquired for write.
*/
static int usermodehelper_disabled = 1;
@@ -282,17 +285,29 @@ static int usermodehelper_disabled = 1;
static atomic_t running_helpers = ATOMIC_INIT(0);
/*
- * Wait queue head used by usermodehelper_pm_callback() to wait for all running
+ * Wait queue head used by usermodehelper_disable() to wait for all running
* helpers to finish.
*/
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
/*
* Time to wait for running_helpers to become zero before the setting of
- * usermodehelper_disabled in usermodehelper_pm_callback() fails
+ * usermodehelper_disabled in usermodehelper_disable() fails
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
+void read_lock_usermodehelper(void)
+{
+ down_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
+
+void read_unlock_usermodehelper(void)
+{
+ up_read(&umhelper_sem);
+}
+EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
+
/**
* usermodehelper_disable - prevent new helpers from being started
*/
@@ -300,8 +315,10 @@ int usermodehelper_disable(void)
{
long retval;
+ down_write(&umhelper_sem);
usermodehelper_disabled = 1;
- smp_mb();
+ up_write(&umhelper_sem);
+
/*
* From now on call_usermodehelper_exec() won't start any new
* helpers, so it is sufficient if running_helpers turns out to
@@ -314,7 +331,9 @@ int usermodehelper_disable(void)
if (retval)
return 0;
+ down_write(&umhelper_sem);
usermodehelper_disabled = 0;
+ up_write(&umhelper_sem);
return -EAGAIN;
}
@@ -323,7 +342,9 @@ int usermodehelper_disable(void)
*/
void usermodehelper_enable(void)
{
+ down_write(&umhelper_sem);
usermodehelper_disabled = 0;
+ up_write(&umhelper_sem);
}
/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b6d216a92639..3d3de633702e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -59,6 +59,31 @@ int kthread_should_stop(void)
EXPORT_SYMBOL(kthread_should_stop);
/**
+ * kthread_freezable_should_stop - should this freezable kthread return now?
+ * @was_frozen: optional out parameter, indicates whether %current was frozen
+ *
+ * kthread_should_stop() for freezable kthreads, which will enter
+ * refrigerator if necessary. This function is safe from kthread_stop() /
+ * freezer deadlock and freezable kthreads should use this function instead
+ * of calling try_to_freeze() directly.
+ */
+bool kthread_freezable_should_stop(bool *was_frozen)
+{
+ bool frozen = false;
+
+ might_sleep();
+
+ if (unlikely(freezing(current)))
+ frozen = __refrigerator(true);
+
+ if (was_frozen)
+ *was_frozen = frozen;
+
+ return kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
+
+/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_states[N_HIGH_MEMORY]);
- current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
+ current->flags |= PF_NOFREEZE;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a6b0503574ee..6d6d28870335 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
HIBERNATION_PLATFORM,
- HIBERNATION_TEST,
- HIBERNATION_TESTPROC,
HIBERNATION_SHUTDOWN,
HIBERNATION_REBOOT,
/* keep last */
@@ -55,7 +53,7 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
-static bool freezer_test_done;
+bool freezer_test_done;
static const struct platform_hibernation_ops *hibernation_ops;
@@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
WARN_ON(1);
return;
}
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
hibernation_ops = ops;
if (ops)
hibernation_mode = HIBERNATION_PLATFORM;
else if (hibernation_mode == HIBERNATION_PLATFORM)
hibernation_mode = HIBERNATION_SHUTDOWN;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
mdelay(5000);
}
-static int hibernation_testmode(int mode)
-{
- if (hibernation_mode == mode) {
- hibernation_debug_sleep();
- return 1;
- }
- return 0;
-}
-
static int hibernation_test(int level)
{
if (pm_test_level == level) {
@@ -114,7 +103,6 @@ static int hibernation_test(int level)
return 0;
}
#else /* !CONFIG_PM_DEBUG */
-static int hibernation_testmode(int mode) { return 0; }
static int hibernation_test(int level) { return 0; }
#endif /* !CONFIG_PM_DEBUG */
@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
goto Platform_finish;
error = disable_nonboot_cpus();
- if (error || hibernation_test(TEST_CPUS)
- || hibernation_testmode(HIBERNATION_TEST))
+ if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
local_irq_disable();
@@ -333,7 +320,7 @@ static int create_image(int platform_mode)
*/
int hibernation_snapshot(int platform_mode)
{
- pm_message_t msg = PMSG_RECOVER;
+ pm_message_t msg;
int error;
error = platform_begin(platform_mode);
@@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
if (error)
goto Cleanup;
- if (hibernation_test(TEST_FREEZER) ||
- hibernation_testmode(HIBERNATION_TESTPROC)) {
+ if (hibernation_test(TEST_FREEZER)) {
/*
* Indicate to the caller that we are returning due to a
@@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
error = dpm_prepare(PMSG_FREEZE);
if (error) {
- dpm_complete(msg);
+ dpm_complete(PMSG_RECOVER);
goto Cleanup;
}
suspend_console();
pm_restrict_gfp_mask();
+
error = dpm_suspend(PMSG_FREEZE);
- if (error)
- goto Recover_platform;
- if (hibernation_test(TEST_DEVICES))
- goto Recover_platform;
+ if (error || hibernation_test(TEST_DEVICES))
+ platform_recover(platform_mode);
+ else
+ error = create_image(platform_mode);
- error = create_image(platform_mode);
/*
- * Control returns here (1) after the image has been created or the
+ * In the case that we call create_image() above, the control
+ * returns here (1) after the image has been created or the
* image creation has failed and (2) after a successful restore.
*/
- Resume_devices:
/* We may need to release the preallocated image pages here. */
if (error || !in_suspend)
swsusp_free();
@@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
platform_end(platform_mode);
return error;
- Recover_platform:
- platform_recover(platform_mode);
- goto Resume_devices;
-
Cleanup:
swsusp_free();
goto Close;
@@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
static void power_down(void)
{
switch (hibernation_mode) {
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
- break;
case HIBERNATION_REBOOT:
kernel_restart(NULL);
break;
@@ -611,17 +590,6 @@ static void power_down(void)
while(1);
}
-static int prepare_processes(void)
-{
- int error = 0;
-
- if (freeze_processes()) {
- error = -EBUSY;
- thaw_processes();
- }
- return error;
-}
-
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
@@ -629,7 +597,7 @@ int hibernate(void)
{
int error;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -654,7 +622,7 @@ int hibernate(void)
sys_sync();
printk("done.\n");
- error = prepare_processes();
+ error = freeze_processes();
if (error)
goto Finish;
@@ -697,7 +665,7 @@ int hibernate(void)
pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error;
}
@@ -811,11 +779,13 @@ static int software_resume(void)
goto close_finish;
error = create_basic_memory_bitmaps();
- if (error)
+ if (error) {
+ usermodehelper_enable();
goto close_finish;
+ }
pr_debug("PM: Preparing processes for restore.\n");
- error = prepare_processes();
+ error = freeze_processes();
if (error) {
swsusp_close(FMODE_READ);
goto Done;
@@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
[HIBERNATION_PLATFORM] = "platform",
[HIBERNATION_SHUTDOWN] = "shutdown",
[HIBERNATION_REBOOT] = "reboot",
- [HIBERNATION_TEST] = "test",
- [HIBERNATION_TESTPROC] = "testproc",
};
/*
@@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
* Hibernation can be handled in several ways. There are a few different ways
* to put the system into the sleep state: using the platform driver (e.g. ACPI
* or other hibernation_ops), powering it off or rebooting it (for testing
- * mostly), or using one of the two available test modes.
+ * mostly).
*
* The sysfs file /sys/power/disk provides an interface for selecting the
* hibernation mode to use. Reading from this file causes the available modes
- * to be printed. There are 5 modes that can be supported:
+ * to be printed. There are 3 modes that can be supported:
*
* 'platform'
* 'shutdown'
* 'reboot'
- * 'test'
- * 'testproc'
*
* If a platform hibernation driver is in use, 'platform' will be supported
* and will be used by default. Otherwise, 'shutdown' will be used by default.
@@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
switch (i) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
break;
case HIBERNATION_PLATFORM:
if (hibernation_ops)
@@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
if (len == strlen(hibernation_modes[i])
&& !strncmp(buf, hibernation_modes[i], len)) {
@@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
switch (mode) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
- case HIBERNATION_TEST:
- case HIBERNATION_TESTPROC:
hibernation_mode = mode;
break;
case HIBERNATION_PLATFORM:
@@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
if (!error)
pr_debug("PM: Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error ? error : n;
}
@@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
if (maj != MAJOR(res) || min != MINOR(res))
goto out;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
swsusp_resume_device = res;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
printk(KERN_INFO "PM: Starting manual resume from disk\n");
noresume = 0;
software_resume();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 36e0f0903c32..9824b41e5a18 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
- *
+ *
* This file is released under the GPLv2
*
*/
@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error ? error : n;
}
@@ -240,7 +240,7 @@ struct kobject *power_kobj;
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
* 'disk' (Suspend-to-Disk).
*
- * store() accepts one of those strings, translates it into the
+ * store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
error = hibernate();
- goto Exit;
+ goto Exit;
}
#ifdef CONFIG_SUSPEND
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 23a2db1ec442..0c4defe6d3b8 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
/* kernel/power/hibernate.c */
+extern bool freezer_test_done;
+
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index addbbe5531bc..77274c9ba2f1 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,16 +22,7 @@
*/
#define TIMEOUT (20 * HZ)
-static inline int freezable(struct task_struct * p)
-{
- if ((p == current) ||
- (p->flags & PF_NOFREEZE) ||
- (p->exit_state != 0))
- return 0;
- return 1;
-}
-
-static int try_to_freeze_tasks(bool sig_only)
+static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
end_time = jiffies + TIMEOUT;
- if (!sig_only)
+ if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (frozen(p) || !freezable(p))
- continue;
-
- if (!freeze_task(p, sig_only))
+ if (p == current || !freeze_task(p))
continue;
/*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
- if (!sig_only) {
+ if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs = elapsed_csecs64;
if (todo) {
- /* This does not unfreeze processes that are already frozen
- * (we have slightly ugly calling convention in that respect,
- * and caller must call thaw_processes() if something fails),
- * but it cleans up leftover PF_FREEZE requests.
- */
printk("\n");
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
- thaw_workqueues();
-
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- task_lock(p);
- if (!wakeup && freezing(p) && !freezer_should_skip(p))
+ if (!wakeup && !freezer_should_skip(p) &&
+ p != current && freezing(p) && !frozen(p))
sched_show_task(p);
- cancel_freezing(p);
- task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
} else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
+ *
+ * On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
+ if (!pm_freezing)
+ atomic_inc(&system_freezing_cnt);
+
printk("Freezing user space processes ... ");
+ pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
printk("\n");
BUG_ON(in_atomic());
+ if (error)
+ thaw_processes();
return error;
}
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
+ *
+ * On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_kernel_threads(void)
{
int error;
printk("Freezing remaining freezable tasks ... ");
+ pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
printk("\n");
BUG_ON(in_atomic());
+ if (error)
+ thaw_processes();
return error;
}
-static void thaw_tasks(bool nosig_only)
+void thaw_processes(void)
{
struct task_struct *g, *p;
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- if (!freezable(p))
- continue;
+ if (pm_freezing)
+ atomic_dec(&system_freezing_cnt);
+ pm_freezing = false;
+ pm_nosig_freezing = false;
- if (nosig_only && should_send_signal(p))
- continue;
+ oom_killer_enable();
+
+ printk("Restarting tasks ... ");
- if (cgroup_freezing_or_frozen(p))
- continue;
+ thaw_workqueues();
- thaw_process(p);
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ __thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
-}
-void thaw_processes(void)
-{
- oom_killer_enable();
-
- printk("Restarting tasks ... ");
- thaw_workqueues();
- thaw_tasks(true);
- thaw_tasks(false);
schedule();
printk("done.\n");
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4953dc054c53..4fd51beed879 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
suspend_ops = ops;
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
@@ -106,13 +106,11 @@ static int suspend_prepare(void)
goto Finish;
error = suspend_freeze_processes();
- if (error) {
- suspend_stats.failed_freeze++;
- dpm_save_failed_step(SUSPEND_FREEZE);
- } else
+ if (!error)
return 0;
- suspend_thaw_processes();
+ suspend_stats.failed_freeze++;
+ dpm_save_failed_step(SUSPEND_FREEZE);
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6d8f535c2b88..6b1ab7a88522 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -21,6 +21,7 @@
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/fs.h>
+#include <linux/compat.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
@@ -30,28 +31,6 @@
#include "power.h"
-/*
- * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
- * will be removed in the future. They are only preserved here for
- * compatibility with existing userland utilities.
- */
-#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
-#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
-
-#define PMOPS_PREPARE 1
-#define PMOPS_ENTER 2
-#define PMOPS_FINISH 3
-
-/*
- * NOTE: The following ioctl definitions are wrong and have been replaced with
- * correct ones. They are only preserved here for compatibility with existing
- * userland utilities and will be removed in the future.
- */
-#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
-#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
-#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
-#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
-
#define SNAPSHOT_MINOR 231
@@ -71,7 +50,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
struct snapshot_data *data;
int error;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -123,7 +102,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->platform_support = 0;
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return error;
}
@@ -132,7 +111,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
swsusp_free();
free_basic_memory_bitmaps();
@@ -146,7 +125,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return 0;
}
@@ -158,7 +137,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
@@ -179,7 +158,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
*offp += res;
Unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return res;
}
@@ -191,7 +170,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
- mutex_lock(&pm_mutex);
+ lock_system_sleep();
data = filp->private_data;
@@ -208,20 +187,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res > 0)
*offp += res;
unlock:
- mutex_unlock(&pm_mutex);
+ unlock_system_sleep();
return res;
}
-static void snapshot_deprecated_ioctl(unsigned int cmd)
-{
- if (printk_ratelimit())
- printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
- "be removed soon, update your suspend-to-disk "
- "utilities\n",
- __builtin_return_address(0), cmd);
-}
-
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -257,11 +227,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
error = freeze_processes();
- if (error) {
- thaw_processes();
+ if (error)
usermodehelper_enable();
- }
- if (!error)
+ else
data->frozen = 1;
break;
@@ -274,8 +242,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->frozen = 0;
break;
- case SNAPSHOT_ATOMIC_SNAPSHOT:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_CREATE_IMAGE:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
@@ -283,10 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
- if (!error)
+ if (!error) {
error = put_user(in_suspend, (int __user *)arg);
- if (!error)
- data->ready = 1;
+ if (!error && !freezer_test_done)
+ data->ready = 1;
+ if (freezer_test_done) {
+ freezer_test_done = false;
+ thaw_processes();
+ }
+ }
break;
case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +276,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->ready = 0;
break;
- case SNAPSHOT_SET_IMAGE_SIZE:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_PREF_IMAGE_SIZE:
image_size = arg;
break;
@@ -321,16 +290,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = put_user(size, (loff_t __user *)arg);
break;
- case SNAPSHOT_AVAIL_SWAP:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_AVAIL_SWAP_SIZE:
size = count_swap_pages(data->swap, 1);
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
- case SNAPSHOT_GET_SWAP_PAGE:
- snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_ALLOC_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
@@ -353,27 +318,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
free_all_swap_pages(data->swap);
break;
- case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
- snapshot_deprecated_ioctl(cmd);
- if (!swsusp_swap_in_use()) {
- /*
- * User space encodes device types as two-byte values,
- * so we need to recode them
- */
- if (old_decode_dev(arg)) {
- data->swap = swap_type_of(old_decode_dev(arg),
- 0, NULL);
- if (data->swap < 0)
- error = -ENODEV;
- } else {
- data->swap = -1;
- error = -EINVAL;
- }
- } else {
- error = -EPERM;
- }
- break;
-
case SNAPSHOT_S2RAM:
if (!data->frozen) {
error = -EPERM;
@@ -396,33 +340,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = hibernation_platform_enter();
break;
- case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
- snapshot_deprecated_ioctl(cmd);
- error = -EINVAL;
-
- switch (arg) {
-
- case PMOPS_PREPARE:
- data->platform_support = 1;
- error = 0;
- break;
-
- case PMOPS_ENTER:
- if (data->platform_support)
- error = hibernation_platform_enter();
- break;
-
- case PMOPS_FINISH:
- if (data->platform_support)
- error = 0;
- break;
-
- default:
- printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
-
- }
- break;
-
case SNAPSHOT_SET_SWAP_AREA:
if (swsusp_swap_in_use()) {
error = -EPERM;
@@ -464,6 +381,66 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
return error;
}
+#ifdef CONFIG_COMPAT
+
+struct compat_resume_swap_area {
+ compat_loff_t offset;
+ u32 dev;
+} __packed;
+
+static long
+snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
+
+ switch (cmd) {
+ case SNAPSHOT_GET_IMAGE_SIZE:
+ case SNAPSHOT_AVAIL_SWAP_SIZE:
+ case SNAPSHOT_ALLOC_SWAP_PAGE: {
+ compat_loff_t __user *uoffset = compat_ptr(arg);
+ loff_t offset;
+ mm_segment_t old_fs;
+ int err;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
+ set_fs(old_fs);
+ if (!err && put_user(offset, uoffset))
+ err = -EFAULT;
+ return err;
+ }
+
+ case SNAPSHOT_CREATE_IMAGE:
+ return snapshot_ioctl(file, cmd,
+ (unsigned long) compat_ptr(arg));
+
+ case SNAPSHOT_SET_SWAP_AREA: {
+ struct compat_resume_swap_area __user *u_swap_area =
+ compat_ptr(arg);
+ struct resume_swap_area swap_area;
+ mm_segment_t old_fs;
+ int err;
+
+ err = get_user(swap_area.offset, &u_swap_area->offset);
+ err |= get_user(swap_area.dev, &u_swap_area->dev);
+ if (err)
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
+ (unsigned long) &swap_area);
+ set_fs(old_fs);
+ return err;
+ }
+
+ default:
+ return snapshot_ioctl(file, cmd, arg);
+ }
+}
+
+#endif /* CONFIG_COMPAT */
+
static const struct file_operations snapshot_fops = {
.open = snapshot_open,
.release = snapshot_release,
@@ -471,6 +448,9 @@ static const struct file_operations snapshot_fops = {
.write = snapshot_write,
.llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = snapshot_compat_ioctl,
+#endif
};
static struct miscdevice snapshot_device = {
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 71034f41a2ba..7ba8feae11b8 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
/*
* Finally, kill the kernel thread. We don't need to be RCU
- * safe anymore, since the bdi is gone from visibility. Force
- * unfreeze of the thread before calling kthread_stop(), otherwise
- * it would never exet if it is currently stuck in the refrigerator.
+ * safe anymore, since the bdi is gone from visibility.
*/
- if (bdi->wb.task) {
- thaw_process(bdi->wb.task);
+ if (bdi->wb.task)
kthread_stop(bdi->wb.task);
- }
}
/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 069b64e521fc..eeb27e27dce3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
*/
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
if (unlikely(frozen(p)))
- thaw_process(p);
+ __thaw_task(p);
return ERR_PTR(-1UL);
}
if (!p->mm)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 00a1a2acd587..3341d8962786 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/freezer.h>
#include <linux/sunrpc/clnt.h>
@@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
- schedule();
+ freezable_schedule();
return 0;
}