summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig64
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/base/firmware_class.c35
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c296
-rw-r--r--drivers/base/power/main.c675
-rw-r--r--drivers/base/power/power.h2
-rw-r--r--drivers/base/power/sysfs.c3
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/topology.c57
10 files changed, 943 insertions, 200 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d7da109c24fd..d47482fa1d21 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -34,6 +34,70 @@ config FW_LOADER
require userspace firmware loading support, but a module built outside
the kernel tree does.
+config FIRMWARE_IN_KERNEL
+ bool "Include in-kernel firmware blobs in kernel binary"
+ depends on FW_LOADER
+ default y
+ help
+ The kernel source tree includes a number of firmware 'blobs'
+ which are used by various drivers. The recommended way to
+ use these is to run "make firmware_install" and to copy the
+ resulting binary files created in usr/lib/firmware directory
+ of the kernel tree to the /lib/firmware on your system so
+ that they can be loaded by userspace helpers on request.
+
+ Enabling this option will build each required firmware blob
+ into the kernel directly, where request_firmware() will find
+ them without having to call out to userspace. This may be
+ useful if your root file system requires a device which uses
+ such firmware, and do not wish to use an initrd.
+
+ This single option controls the inclusion of firmware for
+ every driver which usees request_firmare() and ships its
+ firmware in the kernel source tree, to avoid a proliferation
+ of 'Include firmware for xxx device' options.
+
+ Say 'N' and let firmware be loaded from userspace.
+
+config EXTRA_FIRMWARE
+ string "External firmware blobs to build into the kernel binary"
+ depends on FW_LOADER
+ help
+ This option allows firmware to be built into the kernel, for the
+ cases where the user either cannot or doesn't want to provide it from
+ userspace at runtime (for example, when the firmware in question is
+ required for accessing the boot device, and the user doesn't want to
+ use an initrd).
+
+ This option is a string, and takes the (space-separated) names of the
+ firmware files -- the same names which appear in MODULE_FIRMWARE()
+ and request_firmware() in the source. These files should exist under
+ the directory specified by the EXTRA_FIRMWARE_DIR option, which is
+ by default the firmware/ subdirectory of the kernel source tree.
+
+ So, for example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin",
+ copy the usb8388.bin file into the firmware/ directory, and build the
+ kernel. Then any request_firmware("usb8388.bin") will be
+ satisfied internally without needing to call out to userspace.
+
+ WARNING: If you include additional firmware files into your binary
+ kernel image which are not available under the terms of the GPL,
+ then it may be a violation of the GPL to distribute the resulting
+ image -- since it combines both GPL and non-GPL work. You should
+ consult a lawyer of your own before distributing such an image.
+
+config EXTRA_FIRMWARE_DIR
+ string "Firmware blobs root directory"
+ depends on EXTRA_FIRMWARE != ""
+ default "firmware"
+ help
+ This option controls the directory in which the kernel build system
+ looks for the firmware files listed in the EXTRA_FIRMWARE option.
+ The default is the firmware/ directory in the kernel source tree,
+ but by changing this option you can point it elsewhere, such as
+ the /lib/firmware/ directory or another separate directory
+ containing firmware files.
+
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
depends on DEBUG_KERNEL
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 422cfcad486d..ee0a51a3a41d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -762,6 +762,7 @@ static void device_remove_class_symlinks(struct device *dev)
/**
* dev_set_name - set a device name
* @dev: device
+ * @fmt: format string for the device's name
*/
int dev_set_name(struct device *dev, const char *fmt, ...)
{
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9fd4a8534146..b0be1d18fee2 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -49,6 +49,14 @@ struct firmware_priv {
struct timer_list timeout;
};
+#ifdef CONFIG_FW_LOADER
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+#else /* Module case. Avoid ifdefs later; it'll all optimise out */
+static struct builtin_fw *__start_builtin_fw;
+static struct builtin_fw *__end_builtin_fw;
+#endif
+
static void
fw_load_abort(struct firmware_priv *fw_priv)
{
@@ -257,7 +265,7 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
if (retval)
goto out;
- memcpy(fw->data + offset, buffer, count);
+ memcpy((u8 *)fw->data + offset, buffer, count);
fw->size = max_t(size_t, offset + count, fw->size);
retval = count;
@@ -391,13 +399,12 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
struct device *f_dev;
struct firmware_priv *fw_priv;
struct firmware *firmware;
+ struct builtin_fw *builtin;
int retval;
if (!firmware_p)
return -EINVAL;
- printk(KERN_INFO "firmware: requesting %s\n", name);
-
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
@@ -406,6 +413,20 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+ for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
+ builtin++) {
+ if (strcmp(name, builtin->name))
+ continue;
+ printk(KERN_INFO "firmware: using built-in firmware %s\n",
+ name);
+ firmware->size = builtin->size;
+ firmware->data = builtin->data;
+ return 0;
+ }
+
+ if (uevent)
+ printk(KERN_INFO "firmware: requesting %s\n", name);
+
retval = fw_setup_device(firmware, &f_dev, name, device, uevent);
if (retval)
goto error_kfree_fw;
@@ -473,8 +494,16 @@ request_firmware(const struct firmware **firmware_p, const char *name,
void
release_firmware(const struct firmware *fw)
{
+ struct builtin_fw *builtin;
+
if (fw) {
+ for (builtin = __start_builtin_fw; builtin != __end_builtin_fw;
+ builtin++) {
+ if (fw->data == builtin->data)
+ goto free_fw;
+ }
vfree(fw->data);
+ free_fw:
kfree(fw);
}
}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 39f3d1b3a213..0f867a083338 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -84,8 +84,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
- nid, node_page_state(nid, NR_ACTIVE),
- nid, node_page_state(nid, NR_INACTIVE),
+ nid, K(node_page_state(nid, NR_ACTIVE)),
+ nid, K(node_page_state(nid, NR_INACTIVE)),
#ifdef CONFIG_HIGHMEM
nid, K(i.totalhigh),
nid, K(i.freehigh),
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 911ec600fe71..3f940393d6c7 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv)
drv->driver.suspend = platform_drv_suspend;
if (drv->resume)
drv->driver.resume = platform_drv_resume;
+ if (drv->pm)
+ drv->driver.pm = &drv->pm->base;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
}
-static int platform_suspend(struct device *dev, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+
+static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
int ret = 0;
@@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
return ret;
}
-static int platform_suspend_late(struct device *dev, pm_message_t mesg)
+static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
struct platform_device *pdev;
@@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
return ret;
}
-static int platform_resume_early(struct device *dev)
+static int platform_legacy_resume_early(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
struct platform_device *pdev;
@@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev)
return ret;
}
-static int platform_resume(struct device *dev)
+static int platform_legacy_resume(struct device *dev)
{
int ret = 0;
@@ -606,15 +610,291 @@ static int platform_resume(struct device *dev)
return ret;
}
+static int platform_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void platform_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#ifdef CONFIG_SUSPEND
+
+static int platform_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int platform_pm_suspend_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->suspend_noirq)
+ ret = pdrv->pm->suspend_noirq(dev);
+ } else {
+ ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+static int platform_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int platform_pm_resume_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->resume_noirq)
+ ret = pdrv->pm->resume_noirq(dev);
+ } else {
+ ret = platform_legacy_resume_early(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define platform_pm_suspend NULL
+#define platform_pm_resume NULL
+#define platform_pm_suspend_noirq NULL
+#define platform_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int platform_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int platform_pm_freeze_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->freeze_noirq)
+ ret = pdrv->pm->freeze_noirq(dev);
+ } else {
+ ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int platform_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int platform_pm_thaw_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->thaw_noirq)
+ ret = pdrv->pm->thaw_noirq(dev);
+ } else {
+ ret = platform_legacy_resume_early(dev);
+ }
+
+ return ret;
+}
+
+static int platform_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int platform_pm_poweroff_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->poweroff_noirq)
+ ret = pdrv->pm->poweroff_noirq(dev);
+ } else {
+ ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int platform_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int platform_pm_restore_noirq(struct device *dev)
+{
+ struct platform_driver *pdrv;
+ int ret = 0;
+
+ if (!dev->driver)
+ return 0;
+
+ pdrv = to_platform_driver(dev->driver);
+ if (pdrv->pm) {
+ if (pdrv->pm->restore_noirq)
+ ret = pdrv->pm->restore_noirq(dev);
+ } else {
+ ret = platform_legacy_resume_early(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define platform_pm_freeze NULL
+#define platform_pm_thaw NULL
+#define platform_pm_poweroff NULL
+#define platform_pm_restore NULL
+#define platform_pm_freeze_noirq NULL
+#define platform_pm_thaw_noirq NULL
+#define platform_pm_poweroff_noirq NULL
+#define platform_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+struct pm_ext_ops platform_pm_ops = {
+ .base = {
+ .prepare = platform_pm_prepare,
+ .complete = platform_pm_complete,
+ .suspend = platform_pm_suspend,
+ .resume = platform_pm_resume,
+ .freeze = platform_pm_freeze,
+ .thaw = platform_pm_thaw,
+ .poweroff = platform_pm_poweroff,
+ .restore = platform_pm_restore,
+ },
+ .suspend_noirq = platform_pm_suspend_noirq,
+ .resume_noirq = platform_pm_resume_noirq,
+ .freeze_noirq = platform_pm_freeze_noirq,
+ .thaw_noirq = platform_pm_thaw_noirq,
+ .poweroff_noirq = platform_pm_poweroff_noirq,
+ .restore_noirq = platform_pm_restore_noirq,
+};
+
+#define PLATFORM_PM_OPS_PTR &platform_pm_ops
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define PLATFORM_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
struct bus_type platform_bus_type = {
.name = "platform",
.dev_attrs = platform_dev_attrs,
.match = platform_match,
.uevent = platform_uevent,
- .suspend = platform_suspend,
- .suspend_late = platform_suspend_late,
- .resume_early = platform_resume_early,
- .resume = platform_resume,
+ .pm = PLATFORM_PM_OPS_PTR,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 45cc3d9eacb8..3250c5257b74 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -12,11 +12,9 @@
* and add it to the list of power-controlled devices. sysfs entries for
* controlling device power management will also be added.
*
- * A different set of lists than the global subsystem list are used to
- * keep track of power info because we use different lists to hold
- * devices based on what stage of the power management process they
- * are in. The power domain dependencies may also differ from the
- * ancestral dependencies that the subsystem list maintains.
+ * A separate list is used for keeping track of power info, because the power
+ * domain dependencies may differ from the ancestral dependencies that the
+ * subsystem list maintains.
*/
#include <linux/device.h>
@@ -30,31 +28,40 @@
#include "power.h"
/*
- * The entries in the dpm_active list are in a depth first order, simply
+ * The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery.
*
- * All the other lists are kept in the same order, for consistency.
- * However the lists aren't always traversed in the same order.
- * Semaphores must be acquired from the top (i.e., front) down
- * and released in the opposite order. Devices must be suspended
- * from the bottom (i.e., end) up and resumed in the opposite order.
- * That way no parent will be suspended while it still has an active
- * child.
- *
* Since device_pm_add() may be called with a device semaphore held,
* we must never try to acquire a device semaphore while holding
* dpm_list_mutex.
*/
-LIST_HEAD(dpm_active);
-static LIST_HEAD(dpm_off);
-static LIST_HEAD(dpm_off_irq);
+LIST_HEAD(dpm_list);
static DEFINE_MUTEX(dpm_list_mtx);
-/* 'true' if all devices have been suspended, protected by dpm_list_mtx */
-static bool all_sleeping;
+/*
+ * Set once the preparation of devices for a PM transition has started, reset
+ * before starting to resume devices. Protected by dpm_list_mtx.
+ */
+static bool transition_started;
+
+/**
+ * device_pm_lock - lock the list of active devices used by the PM core
+ */
+void device_pm_lock(void)
+{
+ mutex_lock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_unlock - unlock the list of active devices used by the PM core
+ */
+void device_pm_unlock(void)
+{
+ mutex_unlock(&dpm_list_mtx);
+}
/**
* device_pm_add - add a device to the list of active devices
@@ -68,17 +75,25 @@ int device_pm_add(struct device *dev)
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
mutex_lock(&dpm_list_mtx);
- if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) {
- if (dev->parent->power.sleeping)
- dev_warn(dev, "parent %s is sleeping\n",
+ if (dev->parent) {
+ if (dev->parent->power.status >= DPM_SUSPENDING) {
+ dev_warn(dev, "parent %s is sleeping, will not add\n",
dev->parent->bus_id);
- else
- dev_warn(dev, "all devices are sleeping\n");
+ WARN_ON(true);
+ }
+ } else if (transition_started) {
+ /*
+ * We refuse to register parentless devices while a PM
+ * transition is in progress in order to avoid leaving them
+ * unhandled down the road
+ */
WARN_ON(true);
}
error = dpm_sysfs_add(dev);
- if (!error)
- list_add_tail(&dev->power.entry, &dpm_active);
+ if (!error) {
+ dev->power.status = DPM_ON;
+ list_add_tail(&dev->power.entry, &dpm_list);
+ }
mutex_unlock(&dpm_list_mtx);
return error;
}
@@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev)
mutex_unlock(&dpm_list_mtx);
}
+/**
+ * pm_op - execute the PM operation appropiate for given PM event
+ * @dev: Device.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ */
+static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
+{
+ int error = 0;
+
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ if (ops->suspend) {
+ error = ops->suspend(dev);
+ suspend_report_result(ops->suspend, error);
+ }
+ break;
+ case PM_EVENT_RESUME:
+ if (ops->resume) {
+ error = ops->resume(dev);
+ suspend_report_result(ops->resume, error);
+ }
+ break;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATION
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ if (ops->freeze) {
+ error = ops->freeze(dev);
+ suspend_report_result(ops->freeze, error);
+ }
+ break;
+ case PM_EVENT_HIBERNATE:
+ if (ops->poweroff) {
+ error = ops->poweroff(dev);
+ suspend_report_result(ops->poweroff, error);
+ }
+ break;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ if (ops->thaw) {
+ error = ops->thaw(dev);
+ suspend_report_result(ops->thaw, error);
+ }
+ break;
+ case PM_EVENT_RESTORE:
+ if (ops->restore) {
+ error = ops->restore(dev);
+ suspend_report_result(ops->restore, error);
+ }
+ break;
+#endif /* CONFIG_HIBERNATION */
+ default:
+ error = -EINVAL;
+ }
+ return error;
+}
+
+/**
+ * pm_noirq_op - execute the PM operation appropiate for given PM event
+ * @dev: Device.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * The operation is executed with interrupts disabled by the only remaining
+ * functional CPU in the system.
+ */
+static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
+ pm_message_t state)
+{
+ int error = 0;
+
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ if (ops->suspend_noirq) {
+ error = ops->suspend_noirq(dev);
+ suspend_report_result(ops->suspend_noirq, error);
+ }
+ break;
+ case PM_EVENT_RESUME:
+ if (ops->resume_noirq) {
+ error = ops->resume_noirq(dev);
+ suspend_report_result(ops->resume_noirq, error);
+ }
+ break;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATION
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ if (ops->freeze_noirq) {
+ error = ops->freeze_noirq(dev);
+ suspend_report_result(ops->freeze_noirq, error);
+ }
+ break;
+ case PM_EVENT_HIBERNATE:
+ if (ops->poweroff_noirq) {
+ error = ops->poweroff_noirq(dev);
+ suspend_report_result(ops->poweroff_noirq, error);
+ }
+ break;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ if (ops->thaw_noirq) {
+ error = ops->thaw_noirq(dev);
+ suspend_report_result(ops->thaw_noirq, error);
+ }
+ break;
+ case PM_EVENT_RESTORE:
+ if (ops->restore_noirq) {
+ error = ops->restore_noirq(dev);
+ suspend_report_result(ops->restore_noirq, error);
+ }
+ break;
+#endif /* CONFIG_HIBERNATION */
+ default:
+ error = -EINVAL;
+ }
+ return error;
+}
+
+static char *pm_verb(int event)
+{
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ return "suspend";
+ case PM_EVENT_RESUME:
+ return "resume";
+ case PM_EVENT_FREEZE:
+ return "freeze";
+ case PM_EVENT_QUIESCE:
+ return "quiesce";
+ case PM_EVENT_HIBERNATE:
+ return "hibernate";
+ case PM_EVENT_THAW:
+ return "thaw";
+ case PM_EVENT_RESTORE:
+ return "restore";
+ case PM_EVENT_RECOVER:
+ return "recover";
+ default:
+ return "(unknown PM event)";
+ }
+}
+
+static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
+{
+ dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
+ ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
+ ", may wakeup" : "");
+}
+
+static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
+ int error)
+{
+ printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
+ kobject_name(&dev->kobj), pm_verb(state.event), info, error);
+}
+
/*------------------------- Resume routines -------------------------*/
/**
- * resume_device_early - Power on one device (early resume).
+ * resume_device_noirq - Power on one device (early resume).
* @dev: Device.
+ * @state: PM transition of the system being carried out.
*
* Must be called with interrupts disabled.
*/
-static int resume_device_early(struct device *dev)
+static int resume_device_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->bus && dev->bus->resume_early) {
- dev_dbg(dev, "EARLY resume\n");
+ if (!dev->bus)
+ goto End;
+
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "EARLY ");
+ error = pm_noirq_op(dev, dev->bus->pm, state);
+ } else if (dev->bus->resume_early) {
+ pm_dev_dbg(dev, state, "legacy EARLY ");
error = dev->bus->resume_early(dev);
}
-
+ End:
TRACE_RESUME(error);
return error;
}
/**
* dpm_power_up - Power on all regular (non-sysdev) devices.
+ * @state: PM transition of the system being carried out.
*
- * Walk the dpm_off_irq list and power each device up. This
- * is used for devices that required they be powered down with
- * interrupts disabled. As devices are powered on, they are moved
- * to the dpm_off list.
+ * Execute the appropriate "noirq resume" callback for all devices marked
+ * as DPM_OFF_IRQ.
*
* Must be called with interrupts disabled and only one CPU running.
*/
-static void dpm_power_up(void)
+static void dpm_power_up(pm_message_t state)
{
+ struct device *dev;
- while (!list_empty(&dpm_off_irq)) {
- struct list_head *entry = dpm_off_irq.next;
- struct device *dev = to_device(entry);
+ list_for_each_entry(dev, &dpm_list, power.entry)
+ if (dev->power.status > DPM_OFF) {
+ int error;
- list_move_tail(entry, &dpm_off);
- resume_device_early(dev);
- }
+ dev->power.status = DPM_OFF;
+ error = resume_device_noirq(dev, state);
+ if (error)
+ pm_dev_err(dev, state, " early", error);
+ }
}
/**
* device_power_up - Turn on all devices that need special attention.
+ * @state: PM transition of the system being carried out.
*
* Power on system devices, then devices that required we shut them down
* with interrupts disabled.
*
* Must be called with interrupts disabled.
*/
-void device_power_up(void)
+void device_power_up(pm_message_t state)
{
sysdev_resume();
- dpm_power_up();
+ dpm_power_up(state);
}
EXPORT_SYMBOL_GPL(device_power_up);
/**
* resume_device - Restore state for one device.
* @dev: Device.
- *
+ * @state: PM transition of the system being carried out.
*/
-static int resume_device(struct device *dev)
+static int resume_device(struct device *dev, pm_message_t state)
{
int error = 0;
@@ -175,21 +360,40 @@ static int resume_device(struct device *dev)
down(&dev->sem);
- if (dev->bus && dev->bus->resume) {
- dev_dbg(dev,"resuming\n");
- error = dev->bus->resume(dev);
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "");
+ error = pm_op(dev, &dev->bus->pm->base, state);
+ } else if (dev->bus->resume) {
+ pm_dev_dbg(dev, state, "legacy ");
+ error = dev->bus->resume(dev);
+ }
+ if (error)
+ goto End;
}
- if (!error && dev->type && dev->type->resume) {
- dev_dbg(dev,"resuming\n");
- error = dev->type->resume(dev);
+ if (dev->type) {
+ if (dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ } else if (dev->type->resume) {
+ pm_dev_dbg(dev, state, "legacy type ");
+ error = dev->type->resume(dev);
+ }
+ if (error)
+ goto End;
}
- if (!error && dev->class && dev->class->resume) {
- dev_dbg(dev,"class resume\n");
- error = dev->class->resume(dev);
+ if (dev->class) {
+ if (dev->class->pm) {
+ pm_dev_dbg(dev, state, "class ");
+ error = pm_op(dev, dev->class->pm, state);
+ } else if (dev->class->resume) {
+ pm_dev_dbg(dev, state, "legacy class ");
+ error = dev->class->resume(dev);
+ }
}
-
+ End:
up(&dev->sem);
TRACE_RESUME(error);
@@ -198,78 +402,161 @@ static int resume_device(struct device *dev)
/**
* dpm_resume - Resume every device.
+ * @state: PM transition of the system being carried out.
*
- * Resume the devices that have either not gone through
- * the late suspend, or that did go through it but also
- * went through the early resume.
+ * Execute the appropriate "resume" callback for all devices the status of
+ * which indicates that they are inactive.
+ */
+static void dpm_resume(pm_message_t state)
+{
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&dpm_list_mtx);
+ transition_started = false;
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.next);
+
+ get_device(dev);
+ if (dev->power.status >= DPM_OFF) {
+ int error;
+
+ dev->power.status = DPM_RESUMING;
+ mutex_unlock(&dpm_list_mtx);
+
+ error = resume_device(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error)
+ pm_dev_err(dev, state, "", error);
+ } else if (dev->power.status == DPM_SUSPENDING) {
+ /* Allow new children of the device to be registered */
+ dev->power.status = DPM_RESUMING;
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &list);
+ put_device(dev);
+ }
+ list_splice(&list, &dpm_list);
+ mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * complete_device - Complete a PM transition for given device
+ * @dev: Device.
+ * @state: PM transition of the system being carried out.
+ */
+static void complete_device(struct device *dev, pm_message_t state)
+{
+ down(&dev->sem);
+
+ if (dev->class && dev->class->pm && dev->class->pm->complete) {
+ pm_dev_dbg(dev, state, "completing class ");
+ dev->class->pm->complete(dev);
+ }
+
+ if (dev->type && dev->type->pm && dev->type->pm->complete) {
+ pm_dev_dbg(dev, state, "completing type ");
+ dev->type->pm->complete(dev);
+ }
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
+ pm_dev_dbg(dev, state, "completing ");
+ dev->bus->pm->base.complete(dev);
+ }
+
+ up(&dev->sem);
+}
+
+/**
+ * dpm_complete - Complete a PM transition for all devices.
+ * @state: PM transition of the system being carried out.
*
- * Take devices from the dpm_off_list, resume them,
- * and put them on the dpm_locked list.
+ * Execute the ->complete() callbacks for all devices that are not marked
+ * as DPM_ON.
*/
-static void dpm_resume(void)
+static void dpm_complete(pm_message_t state)
{
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- all_sleeping = false;
- while(!list_empty(&dpm_off)) {
- struct list_head *entry = dpm_off.next;
- struct device *dev = to_device(entry);
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.prev);
- list_move_tail(entry, &dpm_active);
- dev->power.sleeping = false;
- mutex_unlock(&dpm_list_mtx);
- resume_device(dev);
- mutex_lock(&dpm_list_mtx);
+ get_device(dev);
+ if (dev->power.status > DPM_ON) {
+ dev->power.status = DPM_ON;
+ mutex_unlock(&dpm_list_mtx);
+
+ complete_device(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &list);
+ put_device(dev);
}
+ list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
}
/**
* device_resume - Restore state of each device in system.
+ * @state: PM transition of the system being carried out.
*
* Resume all the devices, unlock them all, and allow new
* devices to be registered once again.
*/
-void device_resume(void)
+void device_resume(pm_message_t state)
{
might_sleep();
- dpm_resume();
+ dpm_resume(state);
+ dpm_complete(state);
}
EXPORT_SYMBOL_GPL(device_resume);
/*------------------------- Suspend routines -------------------------*/
-static inline char *suspend_verb(u32 event)
+/**
+ * resume_event - return a PM message representing the resume event
+ * corresponding to given sleep state.
+ * @sleep_state: PM message representing a sleep state.
+ */
+static pm_message_t resume_event(pm_message_t sleep_state)
{
- switch (event) {
- case PM_EVENT_SUSPEND: return "suspend";
- case PM_EVENT_FREEZE: return "freeze";
- case PM_EVENT_PRETHAW: return "prethaw";
- default: return "(unknown suspend event)";
+ switch (sleep_state.event) {
+ case PM_EVENT_SUSPEND:
+ return PMSG_RESUME;
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return PMSG_RECOVER;
+ case PM_EVENT_HIBERNATE:
+ return PMSG_RESTORE;
}
-}
-
-static void
-suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
-{
- dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
- ((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
- ", may wakeup" : "");
+ return PMSG_ON;
}
/**
- * suspend_device_late - Shut down one device (late suspend).
+ * suspend_device_noirq - Shut down one device (late suspend).
* @dev: Device.
- * @state: Power state device is entering.
+ * @state: PM transition of the system being carried out.
*
* This is called with interrupts off and only a single CPU running.
*/
-static int suspend_device_late(struct device *dev, pm_message_t state)
+static int suspend_device_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
- if (dev->bus && dev->bus->suspend_late) {
- suspend_device_dbg(dev, state, "LATE ");
+ if (!dev->bus)
+ return 0;
+
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "LATE ");
+ error = pm_noirq_op(dev, dev->bus->pm, state);
+ } else if (dev->bus->suspend_late) {
+ pm_dev_dbg(dev, state, "legacy LATE ");
error = dev->bus->suspend_late(dev, state);
suspend_report_result(dev->bus->suspend_late, error);
}
@@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state)
/**
* device_power_down - Shut down special devices.
- * @state: Power state to enter.
+ * @state: PM transition of the system being carried out.
*
- * Power down devices that require interrupts to be disabled
- * and move them from the dpm_off list to the dpm_off_irq list.
+ * Power down devices that require interrupts to be disabled.
* Then power down system devices.
*
* Must be called with interrupts disabled and only one CPU running.
*/
int device_power_down(pm_message_t state)
{
+ struct device *dev;
int error = 0;
- while (!list_empty(&dpm_off)) {
- struct list_head *entry = dpm_off.prev;
- struct device *dev = to_device(entry);
-
- error = suspend_device_late(dev, state);
+ list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
+ error = suspend_device_noirq(dev, state);
if (error) {
- printk(KERN_ERR "Could not power down device %s: "
- "error %d\n",
- kobject_name(&dev->kobj), error);
+ pm_dev_err(dev, state, " late", error);
break;
}
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_off_irq);
+ dev->power.status = DPM_OFF_IRQ;
}
-
if (!error)
error = sysdev_suspend(state);
if (error)
- dpm_power_up();
+ dpm_power_up(resume_event(state));
return error;
}
EXPORT_SYMBOL_GPL(device_power_down);
@@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
/**
* suspend_device - Save state of one device.
* @dev: Device.
- * @state: Power state device is entering.
+ * @state: PM transition of the system being carried out.
*/
static int suspend_device(struct device *dev, pm_message_t state)
{
@@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state)
down(&dev->sem);
- if (dev->class && dev->class->suspend) {
- suspend_device_dbg(dev, state, "class ");
- error = dev->class->suspend(dev, state);
- suspend_report_result(dev->class->suspend, error);
+ if (dev->class) {
+ if (dev->class->pm) {
+ pm_dev_dbg(dev, state, "class ");
+ error = pm_op(dev, dev->class->pm, state);
+ } else if (dev->class->suspend) {
+ pm_dev_dbg(dev, state, "legacy class ");
+ error = dev->class->suspend(dev, state);
+ suspend_report_result(dev->class->suspend, error);
+ }
+ if (error)
+ goto End;
}
- if (!error && dev->type && dev->type->suspend) {
- suspend_device_dbg(dev, state, "type ");
- error = dev->type->suspend(dev, state);
- suspend_report_result(dev->type->suspend, error);
+ if (dev->type) {
+ if (dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ } else if (dev->type->suspend) {
+ pm_dev_dbg(dev, state, "legacy type ");
+ error = dev->type->suspend(dev, state);
+ suspend_report_result(dev->type->suspend, error);
+ }
+ if (error)
+ goto End;
}
- if (!error && dev->bus && dev->bus->suspend) {
- suspend_device_dbg(dev, state, "");
- error = dev->bus->suspend(dev, state);
- suspend_report_result(dev->bus->suspend, error);
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "");
+ error = pm_op(dev, &dev->bus->pm->base, state);
+ } else if (dev->bus->suspend) {
+ pm_dev_dbg(dev, state, "legacy ");
+ error = dev->bus->suspend(dev, state);
+ suspend_report_result(dev->bus->suspend, error);
+ }
}
-
+ End:
up(&dev->sem);
return error;
@@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state)
/**
* dpm_suspend - Suspend every device.
- * @state: Power state to put each device in.
- *
- * Walk the dpm_locked list. Suspend each device and move it
- * to the dpm_off list.
+ * @state: PM transition of the system being carried out.
*
- * (For historical reasons, if it returns -EAGAIN, that used to mean
- * that the device would be called again with interrupts disabled.
- * These days, we use the "suspend_late()" callback for that, so we
- * print a warning and consider it an error).
+ * Execute the appropriate "suspend" callbacks for all devices.
*/
static int dpm_suspend(pm_message_t state)
{
+ struct list_head list;
int error = 0;
+ INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_active)) {
- struct list_head *entry = dpm_active.prev;
- struct device *dev = to_device(entry);
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.prev);
- WARN_ON(dev->parent && dev->parent->power.sleeping);
-
- dev->power.sleeping = true;
+ get_device(dev);
mutex_unlock(&dpm_list_mtx);
+
error = suspend_device(dev, state);
+
mutex_lock(&dpm_list_mtx);
if (error) {
- printk(KERN_ERR "Could not suspend device %s: "
- "error %d%s\n",
- kobject_name(&dev->kobj),
- error,
- (error == -EAGAIN ?
- " (please convert to suspend_late)" :
- ""));
- dev->power.sleeping = false;
+ pm_dev_err(dev, state, "", error);
+ put_device(dev);
break;
}
+ dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_off);
+ list_move(&dev->power.entry, &list);
+ put_device(dev);
}
- if (!error)
- all_sleeping = true;
+ list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ return error;
+}
+
+/**
+ * prepare_device - Execute the ->prepare() callback(s) for given device.
+ * @dev: Device.
+ * @state: PM transition of the system being carried out.
+ */
+static int prepare_device(struct device *dev, pm_message_t state)
+{
+ int error = 0;
+
+ down(&dev->sem);
+
+ if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
+ pm_dev_dbg(dev, state, "preparing ");
+ error = dev->bus->pm->base.prepare(dev);
+ suspend_report_result(dev->bus->pm->base.prepare, error);
+ if (error)
+ goto End;
+ }
+
+ if (dev->type && dev->type->pm && dev->type->pm->prepare) {
+ pm_dev_dbg(dev, state, "preparing type ");
+ error = dev->type->pm->prepare(dev);
+ suspend_report_result(dev->type->pm->prepare, error);
+ if (error)
+ goto End;
+ }
+
+ if (dev->class && dev->class->pm && dev->class->pm->prepare) {
+ pm_dev_dbg(dev, state, "preparing class ");
+ error = dev->class->pm->prepare(dev);
+ suspend_report_result(dev->class->pm->prepare, error);
+ }
+ End:
+ up(&dev->sem);
+
+ return error;
+}
+
+/**
+ * dpm_prepare - Prepare all devices for a PM transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback for all devices.
+ */
+static int dpm_prepare(pm_message_t state)
+{
+ struct list_head list;
+ int error = 0;
+
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&dpm_list_mtx);
+ transition_started = true;
+ while (!list_empty(&dpm_list)) {
+ struct device *dev = to_device(dpm_list.next);
+
+ get_device(dev);
+ dev->power.status = DPM_PREPARING;
+ mutex_unlock(&dpm_list_mtx);
+ error = prepare_device(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ dev->power.status = DPM_ON;
+ if (error == -EAGAIN) {
+ put_device(dev);
+ continue;
+ }
+ printk(KERN_ERR "PM: Failed to prepare device %s "
+ "for power transition: error %d\n",
+ kobject_name(&dev->kobj), error);
+ put_device(dev);
+ break;
+ }
+ dev->power.status = DPM_SUSPENDING;
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &list);
+ put_device(dev);
+ }
+ list_splice(&list, &dpm_list);
+ mutex_unlock(&dpm_list_mtx);
return error;
}
/**
* device_suspend - Save state and stop all devices in system.
- * @state: new power management state
+ * @state: PM transition of the system being carried out.
*
- * Prevent new devices from being registered, then lock all devices
- * and suspend them.
+ * Prepare and suspend all devices.
*/
int device_suspend(pm_message_t state)
{
int error;
might_sleep();
- error = dpm_suspend(state);
- if (error)
- device_resume();
+ error = dpm_prepare(state);
+ if (!error)
+ error = dpm_suspend(state);
return error;
}
EXPORT_SYMBOL_GPL(device_suspend);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a6894f2a4b99..a3252c0e2887 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@
* main.c
*/
-extern struct list_head dpm_active; /* The active device list */
+extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry)
{
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d11f74b038db..596aeecfdffe 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -6,9 +6,6 @@
#include <linux/string.h>
#include "power.h"
-int (*platform_enable_wakeup)(struct device *dev, int is_on);
-
-
/*
* wakeup - Report/change current wakeup option for device
*
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2b4b392dcbc1..9b1b20b59e0a 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -153,7 +153,7 @@ EXPORT_SYMBOL(set_trace_device);
* it's not any guarantee, but it's a high _likelihood_ that
* the match is valid).
*/
-void generate_resume_trace(void *tracedata, unsigned int user)
+void generate_resume_trace(const void *tracedata, unsigned int user)
{
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
@@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value)
static int show_dev_hash(unsigned int value)
{
int match = 0;
- struct list_head * entry = dpm_active.prev;
+ struct list_head *entry = dpm_list.prev;
- while (entry != &dpm_active) {
+ while (entry != &dpm_list) {
struct device * dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
if (hash == value) {
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fdf4044d2e74..3f6d9b0a6abe 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -40,6 +40,7 @@ static ssize_t show_##name(struct sys_device *dev, char *buf) \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
}
+#if defined(topology_thread_siblings) || defined(topology_core_siblings)
static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
{
ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
@@ -54,65 +55,65 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
}
return n;
}
+#endif
+#ifdef arch_provides_topology_pointers
#define define_siblings_show_map(name) \
-static inline ssize_t show_##name(struct sys_device *dev, char *buf) \
+static ssize_t show_##name(struct sys_device *dev, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(0, &(topology_##name(cpu)), buf); \
}
#define define_siblings_show_list(name) \
-static inline ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
+static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(1, &(topology_##name(cpu)), buf); \
}
+#else
+#define define_siblings_show_map(name) \
+static ssize_t show_##name(struct sys_device *dev, char *buf) \
+{ \
+ unsigned int cpu = dev->id; \
+ cpumask_t mask = topology_##name(cpu); \
+ return show_cpumap(0, &mask, buf); \
+}
+
+#define define_siblings_show_list(name) \
+static ssize_t show_##name##_list(struct sys_device *dev, char *buf) \
+{ \
+ unsigned int cpu = dev->id; \
+ cpumask_t mask = topology_##name(cpu); \
+ return show_cpumap(1, &mask, buf); \
+}
+#endif
+
#define define_siblings_show_func(name) \
define_siblings_show_map(name); define_siblings_show_list(name)
-#ifdef topology_physical_package_id
define_id_show_func(physical_package_id);
define_one_ro(physical_package_id);
-#define ref_physical_package_id_attr &attr_physical_package_id.attr,
-#else
-#define ref_physical_package_id_attr
-#endif
-#ifdef topology_core_id
define_id_show_func(core_id);
define_one_ro(core_id);
-#define ref_core_id_attr &attr_core_id.attr,
-#else
-#define ref_core_id_attr
-#endif
-#ifdef topology_thread_siblings
define_siblings_show_func(thread_siblings);
define_one_ro(thread_siblings);
define_one_ro(thread_siblings_list);
-#define ref_thread_siblings_attr \
- &attr_thread_siblings.attr, &attr_thread_siblings_list.attr,
-#else
-#define ref_thread_siblings_attr
-#endif
-#ifdef topology_core_siblings
define_siblings_show_func(core_siblings);
define_one_ro(core_siblings);
define_one_ro(core_siblings_list);
-#define ref_core_siblings_attr \
- &attr_core_siblings.attr, &attr_core_siblings_list.attr,
-#else
-#define ref_core_siblings_attr
-#endif
static struct attribute *default_attrs[] = {
- ref_physical_package_id_attr
- ref_core_id_attr
- ref_thread_siblings_attr
- ref_core_siblings_attr
+ &attr_physical_package_id.attr,
+ &attr_core_id.attr,
+ &attr_thread_siblings.attr,
+ &attr_thread_siblings_list.attr,
+ &attr_core_siblings.attr,
+ &attr_core_siblings_list.attr,
NULL
};