summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 12:45:35 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 12:45:35 -0800
commitbad73c5aa069f1f14cc07ce7bbae8d463635560c (patch)
treedb905bb3400e6fe70be95cd20158bed79b2b2c6c /drivers
parentb58ed041a360ed051fab17e4d9b0f451c6fedba7 (diff)
parentf316fc56555a5c3bcf6350f3d5ac26dd2c55f4cb (diff)
downloadlinux-bad73c5aa069f1f14cc07ce7bbae8d463635560c.tar.gz
linux-bad73c5aa069f1f14cc07ce7bbae8d463635560c.tar.bz2
linux-bad73c5aa069f1f14cc07ce7bbae8d463635560c.zip
Merge tag 'pm+acpi-for-3.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: - Introduction of device PM QoS flags. - ACPI device power management update allowing subsystems other than PCI to use it more easily. - ACPI device enumeration rework allowing additional kinds of devices to be enumerated via ACPI. From Mika Westerberg, Adrian Hunter, Mathias Nyman, Andy Shevchenko, and Rafael J. Wysocki. - ACPICA update to version 20121018 from Bob Moore and Lv Zheng. - ACPI memory hotplug update from Wen Congyang and Yasuaki Ishimatsu. - Introduction of acpi_handle_<level>() messaging macros and ACPI-based CPU hot-remove support from Toshi Kani. - ACPI EC updates from Feng Tang. - cpufreq updates from Viresh Kumar, Fabio Baltieri and others. - cpuidle changes to quickly notice governor prediction failure from Youquan Song. - Support for using multiple cpuidle drivers at the same time and cpuidle cleanups from Daniel Lezcano. - devfreq updates from Nishanth Menon and others. - cpupower update from Thomas Renninger. - Fixes and small cleanups all over the place. * tag 'pm+acpi-for-3.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (196 commits) mmc: sdhci-acpi: enable runtime-pm for device HID INT33C6 ACPI: add Haswell LPSS devices to acpi_platform_device_ids list ACPI: add documentation about ACPI 5 enumeration pnpacpi: fix incorrect TEST_ALPHA() test ACPI / PM: Fix header of acpi_dev_pm_detach() in acpi.h ACPI / video: ignore BIOS initial backlight value for HP Folio 13-2000 ACPI : do not use Lid and Sleep button for S5 wakeup ACPI / PNP: Do not crash due to stale pointer use during system resume ACPI / video: Add "Asus UL30VT" to ACPI video detect blacklist ACPI: do acpisleep dmi check when CONFIG_ACPI_SLEEP is set spi / ACPI: add ACPI enumeration support gpio / ACPI: add ACPI support PM / devfreq: remove compiler error with module governors (2) cpupower: IvyBridge (0x3a and 0x3e models) support cpupower: Provide -c param for cpupower monitor to schedule process on all cores cpupower tools: Fix warning and a bug with the cpu package count cpupower tools: Fix malloc of cpu_info structure cpupower tools: Fix issues with sysfs_topology_read_file cpupower tools: Fix minor warnings cpupower tools: Update .gitignore for files created in the debug directories ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile6
-rw-r--r--drivers/acpi/acpi_i2c.c103
-rw-r--r--drivers/acpi/acpi_memhotplug.c193
-rw-r--r--drivers/acpi/acpi_pad.c8
-rw-r--r--drivers/acpi/acpi_platform.c104
-rw-r--r--drivers/acpi/acpica/Makefile3
-rw-r--r--drivers/acpi/acpica/acdebug.h94
-rw-r--r--drivers/acpi/acpica/acdispat.h11
-rw-r--r--drivers/acpi/acpica/acevents.h6
-rw-r--r--drivers/acpi/acpica/acglobal.h73
-rw-r--r--drivers/acpi/acpica/aclocal.h16
-rw-r--r--drivers/acpi/acpica/acmacros.h163
-rw-r--r--drivers/acpi/acpica/acobject.h7
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h3
-rw-r--r--drivers/acpi/acpica/acpredef.h11
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/acutils.h58
-rw-r--r--drivers/acpi/acpica/amlresrc.h1
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c6
-rw-r--r--drivers/acpi/acpica/dsmthdat.c14
-rw-r--r--drivers/acpi/acpica/dsobject.c6
-rw-r--r--drivers/acpi/acpica/dsopcode.c3
-rw-r--r--drivers/acpi/acpica/dsutils.c33
-rw-r--r--drivers/acpi/acpica/dswexec.c10
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c26
-rw-r--r--drivers/acpi/acpica/evgpe.c20
-rw-r--r--drivers/acpi/acpica/evgpeblk.c3
-rw-r--r--drivers/acpi/acpica/evgpeutil.c3
-rw-r--r--drivers/acpi/acpica/evrgnini.c7
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c13
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c9
-rw-r--r--drivers/acpi/acpica/exdebug.c10
-rw-r--r--drivers/acpi/acpica/exdump.c20
-rw-r--r--drivers/acpi/acpica/exfield.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c15
-rw-r--r--drivers/acpi/acpica/exmisc.c5
-rw-r--r--drivers/acpi/acpica/exmutex.c9
-rw-r--r--drivers/acpi/acpica/exnames.c9
-rw-r--r--drivers/acpi/acpica/exoparg1.c11
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c3
-rw-r--r--drivers/acpi/acpica/exoparg6.c5
-rw-r--r--drivers/acpi/acpica/exprep.c13
-rw-r--r--drivers/acpi/acpica/exregion.c3
-rw-r--r--drivers/acpi/acpica/exresnte.c9
-rw-r--r--drivers/acpi/acpica/exresolv.c3
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c4
-rw-r--r--drivers/acpi/acpica/exstoren.c11
-rw-r--r--drivers/acpi/acpica/exstorob.c5
-rw-r--r--drivers/acpi/acpica/exsystem.c9
-rw-r--r--drivers/acpi/acpica/exutils.c5
-rw-r--r--drivers/acpi/acpica/hwacpi.c3
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwpci.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c1
-rw-r--r--drivers/acpi/acpica/hwtimer.c6
-rw-r--r--drivers/acpi/acpica/hwvalid.c1
-rw-r--r--drivers/acpi/acpica/hwxface.c1
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c12
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsalloc.c4
-rw-r--r--drivers/acpi/acpica/nsdump.c10
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsload.c10
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c8
-rw-r--r--drivers/acpi/acpica/nsparse.c8
-rw-r--r--drivers/acpi/acpica/nssearch.c17
-rw-r--r--drivers/acpi/acpica/nsutils.c18
-rw-r--r--drivers/acpi/acpica/nswalk.c10
-rw-r--r--drivers/acpi/acpica/nsxfeval.c20
-rw-r--r--drivers/acpi/acpica/nsxfname.c66
-rw-r--r--drivers/acpi/acpica/nsxfobj.c4
-rw-r--r--drivers/acpi/acpica/psargs.c8
-rw-r--r--drivers/acpi/acpica/psloop.c61
-rw-r--r--drivers/acpi/acpica/psopcode.c29
-rw-r--r--drivers/acpi/acpica/psparse.c13
-rw-r--r--drivers/acpi/acpica/psutils.c4
-rw-r--r--drivers/acpi/acpica/rscalc.c14
-rw-r--r--drivers/acpi/acpica/rslist.c4
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c3
-rw-r--r--drivers/acpi/acpica/utcache.c323
-rw-r--r--drivers/acpi/acpica/utclib.c749
-rw-r--r--drivers/acpi/acpica/utdebug.c37
-rw-r--r--drivers/acpi/acpica/utids.c104
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c150
-rw-r--r--drivers/acpi/acpica/utmutex.c14
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c692
-rw-r--r--drivers/acpi/acpica/utxface.c5
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/battery.c77
-rw-r--r--drivers/acpi/bus.c21
-rw-r--r--drivers/acpi/container.c27
-rw-r--r--drivers/acpi/device_pm.c668
-rw-r--r--drivers/acpi/dock.c56
-rw-r--r--drivers/acpi/ec.c97
-rw-r--r--drivers/acpi/glue.c56
-rw-r--r--drivers/acpi/hed.c2
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/osl.c22
-rw-r--r--drivers/acpi/pci_irq.c15
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/proc.c11
-rw-r--r--drivers/acpi/processor_driver.c74
-rw-r--r--drivers/acpi/processor_idle.c57
-rw-r--r--drivers/acpi/resource.c526
-rw-r--r--drivers/acpi/scan.c154
-rw-r--r--drivers/acpi/sleep.c535
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c34
-rw-r--r--drivers/acpi/utils.c38
-rw-r--r--drivers/acpi/video.c14
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/platform.c26
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c11
-rw-r--r--drivers/base/power/opp.c44
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/qos.c321
-rw-r--r--drivers/base/power/sysfs.c94
-rw-r--r--drivers/cpufreq/Kconfig.arm7
-rw-r--r--drivers/cpufreq/Makefile5
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c37
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c558
-rw-r--r--drivers/cpufreq/cpufreq_governor.c318
-rw-r--r--drivers/cpufreq/cpufreq_governor.h176
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c731
-rw-r--r--drivers/cpufreq/cpufreq_performance.c2
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c11
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c4
-rw-r--r--drivers/cpufreq/spear-cpufreq.c291
-rw-r--r--drivers/cpuidle/Kconfig9
-rw-r--r--drivers/cpuidle/cpuidle.c55
-rw-r--r--drivers/cpuidle/cpuidle.h13
-rw-r--r--drivers/cpuidle/driver.c209
-rw-r--r--drivers/cpuidle/governors/menu.c168
-rw-r--r--drivers/cpuidle/sysfs.c201
-rw-r--r--drivers/devfreq/Kconfig8
-rw-r--r--drivers/devfreq/devfreq.c921
-rw-r--r--drivers/devfreq/exynos4_bus.c45
-rw-r--r--drivers/devfreq/governor.h17
-rw-r--r--drivers/devfreq/governor_performance.c38
-rw-r--r--drivers/devfreq/governor_powersave.c38
-rw-r--r--drivers/devfreq/governor_simpleondemand.c55
-rw-r--r--drivers/devfreq/governor_userspace.c45
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpiolib-acpi.c54
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/idle/intel_idle.c14
-rw-r--r--drivers/mmc/host/Kconfig12
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c312
-rw-r--r--drivers/mtd/nand/sh_flctl.c4
-rw-r--r--drivers/pci/pci-acpi.c79
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/pnpacpi/core.c9
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c296
-rw-r--r--drivers/pnp/resource.c16
-rw-r--r--drivers/spi/spi.c103
184 files changed, 8460 insertions, 3112 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 119d58db8342..0300bf612946 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -181,6 +181,12 @@ config ACPI_DOCK
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
+config ACPI_I2C
+ def_tristate I2C
+ depends on I2C
+ help
+ ACPI I2C enumeration support.
+
config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82422fe90f81..2a4502becd13 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -21,9 +21,10 @@ obj-y += acpi.o \
acpi-y += osl.o utils.o reboot.o
acpi-y += nvs.o
-# sleep related files
+# Power management related files
acpi-y += wakeup.o
acpi-y += sleep.o
+acpi-$(CONFIG_PM) += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -32,10 +33,12 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
acpi-y += bus.o glue.o
acpi-y += scan.o
+acpi-y += resource.o
acpi-y += processor_core.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
+acpi-y += acpi_platform.o
acpi-y += power.o
acpi-y += event.o
acpi-y += sysfs.o
@@ -67,6 +70,7 @@ obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
+obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c
new file mode 100644
index 000000000000..82045e3f5cac
--- /dev/null
+++ b/drivers/acpi/acpi_i2c.c
@@ -0,0 +1,103 @@
+/*
+ * ACPI I2C enumeration support
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/ioport.h>
+
+ACPI_MODULE_NAME("i2c");
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct i2c_board_info *info = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
+ }
+ } else if (info->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ info->irq = r.start;
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct list_head resource_list;
+ struct i2c_board_info info;
+ struct acpi_device *adev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ memset(&info, 0, sizeof(info));
+ info.acpi_node.handle = handle;
+ info.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_add_resource, &info);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !info.addr)
+ return AE_OK;
+
+ strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+ if (!i2c_new_device(adapter, &info)) {
+ dev_err(&adapter->dev,
+ "failed to add I2C device %s from ACPI\n",
+ dev_name(&adev->dev));
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adapter: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+void acpi_i2c_register_devices(struct i2c_adapter *adapter)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(&adapter->dev);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_add_device, NULL,
+ adapter, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adapter->dev, "failed to enumerate I2C slaves\n");
+}
+EXPORT_SYMBOL_GPL(acpi_i2c_register_devices);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 24c807f96636..eb30e5ab4cab 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/memory_hotplug.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <acpi/acpi_drivers.h>
#define ACPI_MEMORY_DEVICE_CLASS "memory"
@@ -78,6 +79,7 @@ struct acpi_memory_info {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
unsigned int enabled:1;
+ unsigned int failed:1;
};
struct acpi_memory_device {
@@ -86,8 +88,6 @@ struct acpi_memory_device {
struct list_head res_list;
};
-static int acpi_hotmem_initialized;
-
static acpi_status
acpi_memory_get_resource(struct acpi_resource *resource, void *context)
{
@@ -125,12 +125,20 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
return AE_OK;
}
+static void
+acpi_memory_free_device_resources(struct acpi_memory_device *mem_device)
+{
+ struct acpi_memory_info *info, *n;
+
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
+}
+
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
- struct acpi_memory_info *info, *n;
-
if (!list_empty(&mem_device->res_list))
return 0;
@@ -138,9 +146,7 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
acpi_memory_get_resource, mem_device);
if (ACPI_FAILURE(status)) {
- list_for_each_entry_safe(info, n, &mem_device->res_list, list)
- kfree(info);
- INIT_LIST_HEAD(&mem_device->res_list);
+ acpi_memory_free_device_resources(mem_device);
return -EINVAL;
}
@@ -170,7 +176,7 @@ acpi_memory_get_device(acpi_handle handle,
/* Get the parent device */
result = acpi_bus_get_device(phandle, &pdevice);
if (result) {
- printk(KERN_WARNING PREFIX "Cannot get acpi bus device");
+ acpi_handle_warn(phandle, "Cannot get acpi bus device\n");
return -EINVAL;
}
@@ -180,14 +186,14 @@ acpi_memory_get_device(acpi_handle handle,
*/
result = acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE);
if (result) {
- printk(KERN_WARNING PREFIX "Cannot add acpi bus");
+ acpi_handle_warn(handle, "Cannot add acpi bus\n");
return -EINVAL;
}
end:
*mem_device = acpi_driver_data(device);
if (!(*mem_device)) {
- printk(KERN_ERR "\n driver data not found");
+ dev_err(&device->dev, "driver data not found\n");
return -ENODEV;
}
@@ -224,7 +230,8 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
if (result) {
- printk(KERN_ERR PREFIX "get_device_resources failed\n");
+ dev_err(&mem_device->device->dev,
+ "get_device_resources failed\n");
mem_device->state = MEMORY_INVALID_STATE;
return result;
}
@@ -251,13 +258,27 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
node = memory_add_physaddr_to_nid(info->start_addr);
result = add_memory(node, info->start_addr, info->length);
- if (result)
+
+ /*
+ * If the memory block has been used by the kernel, add_memory()
+ * returns -EEXIST. If add_memory() returns the other error, it
+ * means that this memory block is not used by the kernel.
+ */
+ if (result && result != -EEXIST) {
+ info->failed = 1;
continue;
- info->enabled = 1;
+ }
+
+ if (!result)
+ info->enabled = 1;
+ /*
+ * Add num_enable even if add_memory() returns -EEXIST, so the
+ * device is bound to this driver.
+ */
num_enabled++;
}
if (!num_enabled) {
- printk(KERN_ERR PREFIX "add_memory failed\n");
+ dev_err(&mem_device->device->dev, "add_memory failed\n");
mem_device->state = MEMORY_INVALID_STATE;
return -EINVAL;
}
@@ -272,68 +293,31 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
return 0;
}
-static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
+static int acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
- acpi_status status;
- struct acpi_object_list arg_list;
- union acpi_object arg;
- unsigned long long current_status;
-
-
- /* Issue the _EJ0 command */
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
- status = acpi_evaluate_object(mem_device->device->handle,
- "_EJ0", &arg_list, NULL);
- /* Return on _EJ0 failure */
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "_EJ0 failed"));
- return -ENODEV;
- }
-
- /* Evalute _STA to check if the device is disabled */
- status = acpi_evaluate_integer(mem_device->device->handle, "_STA",
- NULL, &current_status);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* Check for device status. Device should be disabled */
- if (current_status & ACPI_STA_DEVICE_ENABLED)
- return -EINVAL;
+ int result = 0;
+ struct acpi_memory_info *info, *n;
- return 0;
-}
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
+ if (info->failed)
+ /* The kernel does not use this memory block */
+ continue;
-static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
-{
- int result;
- struct acpi_memory_info *info, *n;
+ if (!info->enabled)
+ /*
+ * The kernel uses this memory block, but it may be not
+ * managed by us.
+ */
+ return -EBUSY;
+ result = remove_memory(info->start_addr, info->length);
+ if (result)
+ return result;
- /*
- * Ask the VM to offline this memory range.
- * Note: Assume that this function returns zero on success
- */
- list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
- if (info->enabled) {
- result = remove_memory(info->start_addr, info->length);
- if (result)
- return result;
- }
+ list_del(&info->list);
kfree(info);
}
- /* Power-off and eject the device */
- result = acpi_memory_powerdown_device(mem_device);
- if (result) {
- /* Set the status of the device to invalid */
- mem_device->state = MEMORY_INVALID_STATE;
- return result;
- }
-
- mem_device->state = MEMORY_POWER_OFF_STATE;
return result;
}
@@ -341,6 +325,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_memory_device *mem_device;
struct acpi_device *device;
+ struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
switch (event) {
@@ -353,7 +338,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived DEVICE CHECK notification for device\n"));
if (acpi_memory_get_device(handle, &mem_device)) {
- printk(KERN_ERR PREFIX "Cannot find driver data\n");
+ acpi_handle_err(handle, "Cannot find driver data\n");
break;
}
@@ -361,7 +346,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
break;
if (acpi_memory_enable_device(mem_device)) {
- printk(KERN_ERR PREFIX "Cannot enable memory device\n");
+ acpi_handle_err(handle,"Cannot enable memory device\n");
break;
}
@@ -373,40 +358,28 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
"\nReceived EJECT REQUEST notification for device\n"));
if (acpi_bus_get_device(handle, &device)) {
- printk(KERN_ERR PREFIX "Device doesn't exist\n");
+ acpi_handle_err(handle, "Device doesn't exist\n");
break;
}
mem_device = acpi_driver_data(device);
if (!mem_device) {
- printk(KERN_ERR PREFIX "Driver Data is NULL\n");
+ acpi_handle_err(handle, "Driver Data is NULL\n");
break;
}
- /*
- * Currently disabling memory device from kernel mode
- * TBD: Can also be disabled from user mode scripts
- * TBD: Can also be disabled by Callback registration
- * with generic sysfs driver
- */
- if (acpi_memory_disable_device(mem_device)) {
- printk(KERN_ERR PREFIX "Disable memory device\n");
- /*
- * If _EJ0 was called but failed, _OST is not
- * necessary.
- */
- if (mem_device->state == MEMORY_INVALID_STATE)
- return;
-
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ pr_err(PREFIX "No memory, dropping EJECT\n");
break;
}
- /*
- * TBD: Invoke acpi_bus_remove to cleanup data structures
- */
+ ej_event->handle = handle;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ (void *)ej_event);
- /* _EJ0 succeeded; _OST is not necessary */
+ /* eject is performed asynchronously */
return;
-
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
@@ -420,6 +393,15 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
return;
}
+static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
+{
+ if (!mem_device)
+ return;
+
+ acpi_memory_free_device_resources(mem_device);
+ kfree(mem_device);
+}
+
static int acpi_memory_device_add(struct acpi_device *device)
{
int result;
@@ -449,23 +431,16 @@ static int acpi_memory_device_add(struct acpi_device *device)
/* Set the device state */
mem_device->state = MEMORY_POWER_ON_STATE;
- printk(KERN_DEBUG "%s \n", acpi_device_name(device));
-
- /*
- * Early boot code has recognized memory area by EFI/E820.
- * If DSDT shows these memory devices on boot, hotplug is not necessary
- * for them. So, it just returns until completion of this driver's
- * start up.
- */
- if (!acpi_hotmem_initialized)
- return 0;
+ pr_debug("%s\n", acpi_device_name(device));
if (!acpi_memory_check_device(mem_device)) {
/* call add_memory func */
result = acpi_memory_enable_device(mem_device);
- if (result)
- printk(KERN_ERR PREFIX
+ if (result) {
+ dev_err(&device->dev,
"Error in acpi_memory_enable_device\n");
+ acpi_memory_device_free(mem_device);
+ }
}
return result;
}
@@ -473,13 +448,18 @@ static int acpi_memory_device_add(struct acpi_device *device)
static int acpi_memory_device_remove(struct acpi_device *device, int type)
{
struct acpi_memory_device *mem_device = NULL;
-
+ int result;
if (!device || !acpi_driver_data(device))
return -EINVAL;
mem_device = acpi_driver_data(device);
- kfree(mem_device);
+
+ result = acpi_memory_remove_memory(mem_device);
+ if (result)
+ return result;
+
+ acpi_memory_device_free(mem_device);
return 0;
}
@@ -568,7 +548,6 @@ static int __init acpi_memory_device_init(void)
return -ENODEV;
}
- acpi_hotmem_initialized = 1;
return 0;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index af4aad6ee2eb..16fa979f7180 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -286,7 +286,7 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
@@ -309,7 +309,7 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
if (num < 1 || num >= 100)
return -EINVAL;
@@ -332,7 +332,7 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
- if (strict_strtoul(buf, 0, &num))
+ if (kstrtoul(buf, 0, &num))
return -EINVAL;
mutex_lock(&isolated_cpus_lock);
acpi_pad_idle_cpus(num);
@@ -457,7 +457,7 @@ static void acpi_pad_notify(acpi_handle handle, u32 event,
dev_name(&device->dev), event, 0);
break;
default:
- printk(KERN_WARNING "Unsupported event [0x%x]\n", event);
+ pr_warn("Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
new file mode 100644
index 000000000000..db129b9f52cb
--- /dev/null
+++ b/drivers/acpi/acpi_platform.c
@@ -0,0 +1,104 @@
+/*
+ * ACPI support for platform bus type.
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("platform");
+
+/**
+ * acpi_create_platform_device - Create platform device for ACPI device node
+ * @adev: ACPI device node to create a platform device for.
+ *
+ * Check if the given @adev can be represented as a platform device and, if
+ * that's the case, create and register a platform device, populate its common
+ * resources and returns a pointer to it. Otherwise, return %NULL.
+ *
+ * The platform device's name will be taken from the @adev's _HID and _UID.
+ */
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
+{
+ struct platform_device *pdev = NULL;
+ struct acpi_device *acpi_parent;
+ struct platform_device_info pdevinfo;
+ struct resource_list_entry *rentry;
+ struct list_head resource_list;
+ struct resource *resources;
+ int count;
+
+ /* If the ACPI node already has a physical device attached, skip it. */
+ if (adev->physical_node_count)
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+ count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (count <= 0)
+ return NULL;
+
+ resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
+ if (!resources) {
+ dev_err(&adev->dev, "No memory for resources\n");
+ acpi_dev_free_resource_list(&resource_list);
+ return NULL;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node)
+ resources[count++] = rentry->res;
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ /*
+ * If the ACPI node has a parent and that parent has a physical device
+ * attached to it, that physical device should be the parent of the
+ * platform device we are about to create.
+ */
+ pdevinfo.parent = NULL;
+ acpi_parent = adev->parent;
+ if (acpi_parent) {
+ struct acpi_device_physical_node *entry;
+ struct list_head *list;
+
+ mutex_lock(&acpi_parent->physical_node_lock);
+ list = &acpi_parent->physical_node_list;
+ if (!list_empty(list)) {
+ entry = list_first_entry(list,
+ struct acpi_device_physical_node,
+ node);
+ pdevinfo.parent = entry->dev;
+ }
+ mutex_unlock(&acpi_parent->physical_node_lock);
+ }
+ pdevinfo.name = dev_name(&adev->dev);
+ pdevinfo.id = -1;
+ pdevinfo.res = resources;
+ pdevinfo.num_res = count;
+ pdevinfo.acpi_node.handle = adev->handle;
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev)) {
+ dev_err(&adev->dev, "platform device creation failed: %ld\n",
+ PTR_ERR(pdev));
+ pdev = NULL;
+ } else {
+ dev_dbg(&adev->dev, "created platform device %s\n",
+ dev_name(&pdev->dev));
+ }
+
+ kfree(resources);
+ return pdev;
+}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7f1d40797e80..c8bc24bd1f72 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -161,3 +161,6 @@ acpi-y += \
utxfinit.o \
utxferror.o \
utxfmutex.o
+
+acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o utclib.o
+
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 5e8abb07724f..432a318c9ed1 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -44,17 +44,28 @@
#ifndef __ACDEBUG_H__
#define __ACDEBUG_H__
-#define ACPI_DEBUG_BUFFER_SIZE 4196
+#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
-struct command_info {
+struct acpi_db_command_info {
char *name; /* Command Name */
u8 min_args; /* Minimum arguments required */
};
-struct argument_info {
+struct acpi_db_command_help {
+ u8 line_count; /* Number of help lines */
+ char *invocation; /* Command Invocation */
+ char *description; /* Command Description */
+};
+
+struct acpi_db_argument_info {
char *name; /* Argument Name */
};
+struct acpi_db_execute_walk {
+ u32 count;
+ u32 max_count;
+};
+
#define PARAM_LIST(pl) pl
#define DBTEST_OUTPUT_LEVEL(lvl) if (acpi_gbl_db_opt_verbose)
#define VERBOSE_PRINT(fp) DBTEST_OUTPUT_LEVEL(lvl) {\
@@ -77,59 +88,71 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
/*
* dbcmds - debug commands and output routines
*/
-acpi_status acpi_db_disassemble_method(char *name);
+struct acpi_namespace_node *acpi_db_convert_to_node(char *in_string);
void acpi_db_display_table_info(char *table_arg);
-void acpi_db_unload_acpi_table(char *table_arg, char *instance_arg);
+void acpi_db_display_template(char *buffer_arg);
-void
-acpi_db_set_method_breakpoint(char *location,
- struct acpi_walk_state *walk_state,
- union acpi_parse_object *op);
+void acpi_db_unload_acpi_table(char *name);
-void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op);
+void acpi_db_send_notify(char *name, u32 value);
-void acpi_db_get_bus_info(void);
+void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
-void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
+acpi_status acpi_db_sleep(char *object_arg);
-void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
+void acpi_db_display_locks(void);
-void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
+void acpi_db_display_resources(char *object_arg);
-void acpi_db_send_notify(char *name, u32 value);
+ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
+
+void acpi_db_display_handlers(void);
+
+ACPI_HW_DEPENDENT_RETURN_VOID(void
+ acpi_db_generate_gpe(char *gpe_arg,
+ char *block_arg))
+
+/*
+ * dbmethod - control method commands
+ */
+void
+acpi_db_set_method_breakpoint(char *location,
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op);
+
+void acpi_db_set_method_call_breakpoint(union acpi_parse_object *op);
void acpi_db_set_method_data(char *type_arg, char *index_arg, char *value_arg);
-acpi_status
-acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
+acpi_status acpi_db_disassemble_method(char *name);
-void acpi_db_display_interfaces(char *action_arg, char *interface_name_arg);
+void acpi_db_disassemble_aml(char *statements, union acpi_parse_object *op);
-acpi_status acpi_db_find_name_in_namespace(char *name_arg);
+void acpi_db_batch_execute(char *count_arg);
+/*
+ * dbnames - namespace commands
+ */
void acpi_db_set_scope(char *name);
-ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
+void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
-void acpi_db_find_references(char *object_arg);
+void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
-void acpi_db_display_locks(void);
+acpi_status acpi_db_find_name_in_namespace(char *name_arg);
-void acpi_db_display_resources(char *object_arg);
+void acpi_db_check_predefined_names(void);
-ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
+acpi_status
+acpi_db_display_objects(char *obj_type_arg, char *display_count_arg);
void acpi_db_check_integrity(void);
-ACPI_HW_DEPENDENT_RETURN_VOID(void
- acpi_db_generate_gpe(char *gpe_arg,
- char *block_arg))
-
-void acpi_db_check_predefined_names(void);
+void acpi_db_find_references(char *object_arg);
-void acpi_db_batch_execute(void);
+void acpi_db_get_bus_info(void);
/*
* dbdisply - debug display commands
@@ -161,7 +184,8 @@ acpi_db_display_argument_object(union acpi_operand_object *obj_desc,
/*
* dbexec - debugger control method execution
*/
-void acpi_db_execute(char *name, char **args, u32 flags);
+void
+acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags);
void
acpi_db_create_execution_threads(char *num_threads_arg,
@@ -175,7 +199,8 @@ u32 acpi_db_get_cache_info(struct acpi_memory_list *cache);
* dbfileio - Debugger file I/O commands
*/
acpi_object_type
-acpi_db_match_argument(char *user_argument, struct argument_info *arguments);
+acpi_db_match_argument(char *user_argument,
+ struct acpi_db_argument_info *arguments);
void acpi_db_close_debug_file(void);
@@ -208,6 +233,11 @@ acpi_db_command_dispatch(char *input_buffer,
void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
+acpi_status acpi_db_user_commands(char prompt, union acpi_parse_object *op);
+
+char *acpi_db_get_next_token(char *string,
+ char **next, acpi_object_type * return_type);
+
/*
* dbstats - Generation and display of ACPI table statistics
*/
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 5935ba6707e2..ed33ebcdaebe 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -309,10 +309,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state);
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
- *origin, union acpi_operand_object
- *mth_desc, struct acpi_thread_state
- *thread);
+struct acpi_walk_state * acpi_ds_create_walk_state(acpi_owner_id owner_id,
+ union acpi_parse_object
+ *origin,
+ union acpi_operand_object
+ *mth_desc,
+ struct acpi_thread_state
+ *thread);
acpi_status
acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c0a43b38c6a3..e975c6720448 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -84,9 +84,11 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info);
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+acpi_status
+acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ce79100fb5eb..64472e4ec329 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -70,7 +70,7 @@
/*
* Enable "slack" in the AML interpreter? Default is FALSE, and the
- * interpreter strictly follows the ACPI specification. Setting to TRUE
+ * interpreter strictly follows the ACPI specification. Setting to TRUE
* allows the interpreter to ignore certain errors and/or bad AML constructs.
*
* Currently, these features are enabled by this flag:
@@ -155,26 +155,6 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
/*****************************************************************************
*
- * Debug support
- *
- ****************************************************************************/
-
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
-
-/* Support for dynamic control method tracing mechanism */
-
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
-
-/*****************************************************************************
- *
* ACPI Table globals
*
****************************************************************************/
@@ -259,15 +239,6 @@ ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE reg
*
****************************************************************************/
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
-/* Lists for tracking memory allocations */
-
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-#endif
-
/* Object caches */
ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
@@ -326,6 +297,15 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
#endif
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+/* Lists for tracking memory allocations */
+
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
+ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
+ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
+#endif
+
/*****************************************************************************
*
* Namespace globals
@@ -396,13 +376,35 @@ ACPI_EXTERN struct acpi_gpe_block_info
#if (!ACPI_REDUCED_HARDWARE)
ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
+ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
*
+ * Debug support
+ *
+ ****************************************************************************/
+
+/* Procedure nesting level for debug output */
+
+extern u32 acpi_gbl_nesting_level;
+
+/* Event counters */
+
+ACPI_EXTERN u32 acpi_gpe_count;
+ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+
+/* Support for dynamic control method tracing mechanism */
+
+ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
+ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+
+/*****************************************************************************
+ *
* Debugger globals
*
****************************************************************************/
@@ -426,10 +428,11 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[80];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[80];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[40];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[40];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
ACPI_EXTERN char *acpi_gbl_db_buffer;
ACPI_EXTERN char *acpi_gbl_db_filename;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c816ee675094..ff8bd0061e8b 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -262,10 +262,10 @@ struct acpi_create_field_info {
};
typedef
-acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
/*
- * Bitmapped ACPI types. Used internally only
+ * Bitmapped ACPI types. Used internally only
*/
#define ACPI_BTYPE_ANY 0x00000000
#define ACPI_BTYPE_INTEGER 0x00000001
@@ -486,8 +486,10 @@ struct acpi_gpe_device_info {
struct acpi_namespace_node *gpe_device;
};
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context);
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+ gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *context);
/* Information about each particular fixed event */
@@ -582,7 +584,7 @@ struct acpi_pscope_state {
};
/*
- * Thread state - one per thread across multiple walk states. Multiple walk
+ * Thread state - one per thread across multiple walk states. Multiple walk
* states are created when there are nested control methods executing.
*/
struct acpi_thread_state {
@@ -645,7 +647,7 @@ union acpi_generic_state {
*
****************************************************************************/
-typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state);
/* Address Range info block */
@@ -1031,6 +1033,7 @@ struct acpi_db_method_info {
acpi_handle method;
acpi_handle main_thread_gate;
acpi_handle thread_complete_gate;
+ acpi_handle info_gate;
acpi_thread_id *threads;
u32 num_threads;
u32 num_created;
@@ -1041,6 +1044,7 @@ struct acpi_db_method_info {
u32 num_loops;
char pathname[128];
char **args;
+ acpi_object_type *types;
/*
* Arguments to be passed to method for the command
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index a7f68c47f517..5efad99f2169 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -84,29 +84,29 @@
/* These macros reverse the bytes during the move, converting little-endian to big endian */
- /* Big Endian <== Little Endian */
- /* Hi...Lo Lo...Hi */
+ /* Big Endian <== Little Endian */
+ /* Hi...Lo Lo...Hi */
/* 16-bit source, 16/32/64 destination */
#define ACPI_MOVE_16_TO_16(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
- (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_16_TO_32(d, s) {(*(u32 *)(void *)(d))=0;\
- ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
- ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+ ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_16_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\
- ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
- ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+ ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+ ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
/* 32-bit source, 16/32/64 destination */
#define ACPI_MOVE_32_TO_16(d, s) ACPI_MOVE_16_TO_16(d, s) /* Truncate to 16 */
#define ACPI_MOVE_32_TO_32(d, s) {(( u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
- (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
- (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
- (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+ (( u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
+ (( u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+ (( u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
#define ACPI_MOVE_32_TO_64(d, s) {(*(u64 *)(void *)(d))=0;\
((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
@@ -196,24 +196,12 @@
#endif
#endif
-/* Macros based on machine integer width */
-
-#if ACPI_MACHINE_WIDTH == 32
-#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_32_TO_16(d, s)
-
-#elif ACPI_MACHINE_WIDTH == 64
-#define ACPI_MOVE_SIZE_TO_16(d, s) ACPI_MOVE_64_TO_16(d, s)
-
-#else
-#error unknown ACPI_MACHINE_WIDTH
-#endif
-
/*
* Fast power-of-two math macros for non-optimized compilers
*/
-#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2)))
-#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2)))
-#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1)))
+#define _ACPI_DIV(value, power_of2) ((u32) ((value) >> (power_of2)))
+#define _ACPI_MUL(value, power_of2) ((u32) ((value) << (power_of2)))
+#define _ACPI_MOD(value, divisor) ((u32) ((value) & ((divisor) -1)))
#define ACPI_DIV_2(a) _ACPI_DIV(a, 1)
#define ACPI_MUL_2(a) _ACPI_MUL(a, 1)
@@ -238,12 +226,12 @@
/*
* Rounding macros (Power of two boundaries only)
*/
-#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \
- (~(((acpi_size) boundary)-1)))
+#define ACPI_ROUND_DOWN(value, boundary) (((acpi_size)(value)) & \
+ (~(((acpi_size) boundary)-1)))
-#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \
- (((acpi_size) boundary)-1)) & \
- (~(((acpi_size) boundary)-1)))
+#define ACPI_ROUND_UP(value, boundary) ((((acpi_size)(value)) + \
+ (((acpi_size) boundary)-1)) & \
+ (~(((acpi_size) boundary)-1)))
/* Note: sizeof(acpi_size) evaluates to either 4 or 8 (32- vs 64-bit mode) */
@@ -264,7 +252,7 @@
#define ACPI_ROUND_UP_TO(value, boundary) (((value) + ((boundary)-1)) / (boundary))
-#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
+#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
/*
* Bitmask creation
@@ -355,7 +343,6 @@
* Ascii error messages can be configured out
*/
#ifndef ACPI_NO_ERROR_MESSAGES
-
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
* the plist contains a set of parens to allow variable-length lists.
@@ -375,18 +362,15 @@
#define ACPI_WARN_PREDEFINED(plist)
#define ACPI_INFO_PREDEFINED(plist)
-#endif /* ACPI_NO_ERROR_MESSAGES */
+#endif /* ACPI_NO_ERROR_MESSAGES */
/*
* Debug macros that are conditionally compiled
*/
#ifdef ACPI_DEBUG_OUTPUT
-
/*
* Function entry tracing
*/
-#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
-
#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
#define ACPI_FUNCTION_TRACE_PTR(a, b) ACPI_FUNCTION_NAME(a) \
@@ -464,45 +448,19 @@
#endif /* ACPI_SIMPLE_RETURN_MACROS */
-#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_TRACE_PTR(a,b)
-#define ACPI_FUNCTION_TRACE_U32(a,b)
-#define ACPI_FUNCTION_TRACE_STR(a,b)
-#define ACPI_FUNCTION_EXIT
-#define ACPI_FUNCTION_STATUS_EXIT(s)
-#define ACPI_FUNCTION_VALUE_EXIT(s)
-#define ACPI_FUNCTION_TRACE(a)
-#define ACPI_FUNCTION_ENTRY()
-
-#define return_VOID return
-#define return_ACPI_STATUS(s) return(s)
-#define return_VALUE(s) return(s)
-#define return_UINT8(s) return(s)
-#define return_UINT32(s) return(s)
-#define return_PTR(s) return(s)
-
-#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */
-
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_NORMAL_EXEC(a)
-
-#define ACPI_DEBUG_DEFINE(a) a;
#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
#define _VERBOSE_STRUCTURES
-/* Stack and buffer dumping */
+/* Various object display routines for debug */
#define ACPI_DUMP_STACK_ENTRY(a) acpi_ex_dump_operand((a), 0)
-#define ACPI_DUMP_OPERANDS(a, b, c) acpi_ex_dump_operands(a, b, c)
-
+#define ACPI_DUMP_OPERANDS(a, b ,c) acpi_ex_dump_operands(a, b, c)
#define ACPI_DUMP_ENTRY(a, b) acpi_ns_dump_entry (a, b)
#define ACPI_DUMP_PATHNAME(a, b, c, d) acpi_ns_dump_pathname(a, b, c, d)
-#define ACPI_DUMP_RESOURCE_LIST(a) acpi_rs_dump_resource_list(a)
-#define ACPI_DUMP_BUFFER(a, b) acpi_ut_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
+#define ACPI_DUMP_BUFFER(a, b) acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
#else
/*
@@ -510,25 +468,23 @@
* leaving no executable debug code!
*/
#define ACPI_DEBUG_EXEC(a)
-#define ACPI_NORMAL_EXEC(a) a;
-
-#define ACPI_DEBUG_DEFINE(a) do { } while(0)
-#define ACPI_DEBUG_ONLY_MEMBERS(a) do { } while(0)
-#define ACPI_FUNCTION_TRACE(a) do { } while(0)
-#define ACPI_FUNCTION_TRACE_PTR(a, b) do { } while(0)
-#define ACPI_FUNCTION_TRACE_U32(a, b) do { } while(0)
-#define ACPI_FUNCTION_TRACE_STR(a, b) do { } while(0)
-#define ACPI_FUNCTION_EXIT do { } while(0)
-#define ACPI_FUNCTION_STATUS_EXIT(s) do { } while(0)
-#define ACPI_FUNCTION_VALUE_EXIT(s) do { } while(0)
-#define ACPI_FUNCTION_ENTRY() do { } while(0)
-#define ACPI_DUMP_STACK_ENTRY(a) do { } while(0)
-#define ACPI_DUMP_OPERANDS(a, b, c) do { } while(0)
-#define ACPI_DUMP_ENTRY(a, b) do { } while(0)
-#define ACPI_DUMP_TABLES(a, b) do { } while(0)
-#define ACPI_DUMP_PATHNAME(a, b, c, d) do { } while(0)
-#define ACPI_DUMP_RESOURCE_LIST(a) do { } while(0)
-#define ACPI_DUMP_BUFFER(a, b) do { } while(0)
+#define ACPI_DEBUG_ONLY_MEMBERS(a)
+#define ACPI_FUNCTION_TRACE(a)
+#define ACPI_FUNCTION_TRACE_PTR(a, b)
+#define ACPI_FUNCTION_TRACE_U32(a, b)
+#define ACPI_FUNCTION_TRACE_STR(a, b)
+#define ACPI_FUNCTION_EXIT
+#define ACPI_FUNCTION_STATUS_EXIT(s)
+#define ACPI_FUNCTION_VALUE_EXIT(s)
+#define ACPI_FUNCTION_ENTRY()
+#define ACPI_DUMP_STACK_ENTRY(a)
+#define ACPI_DUMP_OPERANDS(a, b, c)
+#define ACPI_DUMP_ENTRY(a, b)
+#define ACPI_DUMP_TABLES(a, b)
+#define ACPI_DUMP_PATHNAME(a, b, c, d)
+#define ACPI_DUMP_BUFFER(a, b)
+#define ACPI_DEBUG_PRINT(pl)
+#define ACPI_DEBUG_PRINT_RAW(pl)
#define return_VOID return
#define return_ACPI_STATUS(s) return(s)
@@ -556,18 +512,6 @@
#define ACPI_DEBUGGER_EXEC(a)
#endif
-#ifdef ACPI_DEBUG_OUTPUT
-/*
- * 1) Set name to blanks
- * 2) Copy the object name
- */
-#define ACPI_ADD_OBJECT_NAME(a,b) ACPI_MEMSET (a->common.name, ' ', sizeof (a->common.name));\
- ACPI_STRNCPY (a->common.name, acpi_gbl_ns_type_names[b], sizeof (a->common.name))
-#else
-
-#define ACPI_ADD_OBJECT_NAME(a,b)
-#endif
-
/*
* Memory allocation tracking (DEBUG ONLY)
*/
@@ -578,13 +522,13 @@
/* Memory allocation */
#ifndef ACPI_ALLOCATE
-#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size) (a), ACPI_MEM_PARAMETERS)
#endif
#ifndef ACPI_ALLOCATE_ZEROED
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size) (a), ACPI_MEM_PARAMETERS)
#endif
#ifndef ACPI_FREE
-#define ACPI_FREE(a) acpio_os_free(a)
+#define ACPI_FREE(a) acpi_os_free(a)
#endif
#define ACPI_MEM_TRACKING(a)
@@ -592,16 +536,25 @@
/* Memory allocation */
-#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
-#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size) (a), ACPI_MEM_PARAMETERS)
#define ACPI_FREE(a) acpi_ut_free_and_track(a, ACPI_MEM_PARAMETERS)
#define ACPI_MEM_TRACKING(a) a
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
-/* Preemption point */
-#ifndef ACPI_PREEMPTION_POINT
-#define ACPI_PREEMPTION_POINT() /* no preemption */
-#endif
+/*
+ * Macros used for ACPICA utilities only
+ */
+
+/* Generate a UUID */
+
+#define ACPI_INIT_UUID(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ (a) & 0xFF, ((a) >> 8) & 0xFF, ((a) >> 16) & 0xFF, ((a) >> 24) & 0xFF, \
+ (b) & 0xFF, ((b) >> 8) & 0xFF, \
+ (c) & 0xFF, ((c) >> 8) & 0xFF, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)
+
+#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
#endif /* ACMACROS_H */
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 364a1303fb8f..24eb9eac9514 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
@@ -179,7 +178,7 @@ struct acpi_object_method {
union acpi_operand_object *mutex;
u8 *aml_start;
union {
- ACPI_INTERNAL_METHOD implementation;
+ acpi_internal_method implementation;
union acpi_operand_object *handler;
} dispatch;
@@ -198,7 +197,7 @@ struct acpi_object_method {
/******************************************************************************
*
- * Objects that can be notified. All share a common notify_info area.
+ * Objects that can be notified. All share a common notify_info area.
*
*****************************************************************************/
@@ -235,7 +234,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
/******************************************************************************
*
- * Fields. All share a common header/info field.
+ * Fields. All share a common header/info field.
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 9440d053fbb3..d786a5128b78 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -54,7 +54,7 @@
#define _UNK 0x6B
/*
- * Reserved ASCII characters. Do not use any of these for
+ * Reserved ASCII characters. Do not use any of these for
* internal opcodes, since they are used to differentiate
* name strings from AML opcodes
*/
@@ -63,7 +63,7 @@
#define _PFX 0x6D
/*
- * All AML opcodes and the parse-time arguments for each. Used by the AML
+ * All AML opcodes and the parse-time arguments for each. Used by the AML
* parser Each list is compressed into a 32-bit number and stored in the
* master opcode table (in psopcode.c).
*/
@@ -193,7 +193,7 @@
#define ARGP_ZERO_OP ARG_NONE
/*
- * All AML opcodes and the runtime arguments for each. Used by the AML
+ * All AML opcodes and the runtime arguments for each. Used by the AML
* interpreter Each list is compressed into a 32-bit number and stored
* in the master opcode table (in psopcode.c).
*
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index b725d780d34d..eefcf47a61a0 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -150,8 +150,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state *parser_state);
void
acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
- union acpi_parse_object **op,
- u32 * arg_list, u32 * arg_count);
+ union acpi_parse_object **op, u32 *arg_list, u32 *arg_count);
acpi_status
acpi_ps_push_scope(struct acpi_parse_state *parser_state,
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 3080c017f5ba..9dfa1c83bd4e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -150,8 +150,7 @@ enum acpi_return_package_types {
* is saved here (rather than in a separate table) in order to minimize the
* overall size of the stored data.
*/
-static const union acpi_predefined_info predefined_names[] =
-{
+static const union acpi_predefined_info predefined_names[] = {
{{"_AC0", 0, ACPI_RTYPE_INTEGER}},
{{"_AC1", 0, ACPI_RTYPE_INTEGER}},
{{"_AC2", 0, ACPI_RTYPE_INTEGER}},
@@ -538,7 +537,8 @@ static const union acpi_predefined_info predefined_names[] =
/* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
- {{"_WAK", 1, ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
+ {{"_WAK", 1,
+ ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
/* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
@@ -551,11 +551,12 @@ static const union acpi_predefined_info predefined_names[] =
};
#if 0
+
/* This is an internally implemented control method, no need to check */
- {{"_OSI", 1, ACPI_RTYPE_INTEGER}},
+{ {
+"_OSI", 1, ACPI_RTYPE_INTEGER}},
/* TBD: */
-
_PRT - currently ignore reversed entries. attempt to fix here?
think about possibly fixing package elements like _BIF, etc.
#endif
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index f196e2c9a71f..937e66c65d1e 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -53,7 +53,7 @@
****************************************************************************/
/*
- * Walk state - current state of a parse tree walk. Used for both a leisurely
+ * Walk state - current state of a parse tree walk. Used for both a leisurely
* stroll through the tree (for whatever reason), and for control method
* execution.
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 5035327ebccc..b0f5f92b674a 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -69,6 +69,22 @@ extern const char *acpi_gbl_siz_decode[];
extern const char *acpi_gbl_trs_decode[];
extern const char *acpi_gbl_ttp_decode[];
extern const char *acpi_gbl_typ_decode[];
+extern const char *acpi_gbl_ppc_decode[];
+extern const char *acpi_gbl_ior_decode[];
+extern const char *acpi_gbl_dts_decode[];
+extern const char *acpi_gbl_ct_decode[];
+extern const char *acpi_gbl_sbt_decode[];
+extern const char *acpi_gbl_am_decode[];
+extern const char *acpi_gbl_sm_decode[];
+extern const char *acpi_gbl_wm_decode[];
+extern const char *acpi_gbl_cph_decode[];
+extern const char *acpi_gbl_cpo_decode[];
+extern const char *acpi_gbl_dp_decode[];
+extern const char *acpi_gbl_ed_decode[];
+extern const char *acpi_gbl_bpb_decode[];
+extern const char *acpi_gbl_sb_decode[];
+extern const char *acpi_gbl_fc_decode[];
+extern const char *acpi_gbl_pt_decode[];
#endif
/* Types for Resource descriptor entries */
@@ -79,14 +95,14 @@ extern const char *acpi_gbl_typ_decode[];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
-acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+acpi_status(*acpi_walk_aml_callback) (u8 *aml,
u32 length,
u32 offset,
u8 resource_index, void **context);
typedef
acpi_status(*acpi_pkg_callback) (u8 object_type,
- union acpi_operand_object * source_object,
+ union acpi_operand_object *source_object,
union acpi_generic_state * state,
void *context);
@@ -202,7 +218,9 @@ extern const u8 _acpi_ctype[];
#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
-#endif /* ACPI_USE_SYSTEM_CLIBRARY */
+#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
+
+#define ACPI_IS_ASCII(c) ((c) < 0x80)
/*
* utcopy - Object construction and conversion interfaces
@@ -210,11 +228,11 @@ extern const u8 _acpi_ctype[];
acpi_status
acpi_ut_build_simple_object(union acpi_operand_object *obj,
union acpi_object *user_obj,
- u8 * data_space, u32 * buffer_space_used);
+ u8 *data_space, u32 *buffer_space_used);
acpi_status
acpi_ut_build_package_object(union acpi_operand_object *obj,
- u8 * buffer, u32 * space_used);
+ u8 *buffer, u32 *space_used);
acpi_status
acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *obj,
@@ -287,9 +305,10 @@ acpi_ut_ptr_exit(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, u8 *ptr);
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+void
+acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id);
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset);
void acpi_ut_report_error(char *module_name, u32 line_number);
@@ -337,15 +356,19 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
*/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id);
+ struct acpi_pnp_device_id ** return_id);
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id);
+ struct acpi_pnp_device_id ** return_id);
+
+acpi_status
+acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id);
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
- struct acpica_device_id_list **return_cid_list);
+ struct acpi_pnp_device_id_list ** return_cid_list);
/*
* utlock - reader/writer locks
@@ -479,15 +502,19 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void acpi_ut_strupr(char *src_string);
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
void acpi_ut_print_string(char *string, u8 max_length);
u8 acpi_ut_valid_acpi_name(u32 name);
-acpi_name acpi_ut_repair_name(char *name);
+void acpi_ut_repair_name(char *name);
u8 acpi_ut_valid_acpi_char(char character, u32 position);
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer);
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
/* Values for Base above (16=Hex, 10=Decimal) */
@@ -508,12 +535,12 @@ acpi_ut_display_init_pathname(u8 type,
* utresrc
*/
acpi_status
-acpi_ut_walk_aml_resources(u8 * aml,
+acpi_ut_walk_aml_resources(u8 *aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function,
void **context);
-acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
+acpi_status acpi_ut_validate_resource(void *aml, u8 *return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -524,8 +551,7 @@ u8 acpi_ut_get_resource_header_length(void *aml);
u8 acpi_ut_get_resource_type(void *aml);
acpi_status
-acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
- u8 ** end_tag);
+acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag);
/*
* utmutex - mutex support
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index af4947956ec2..968449685e06 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: amlresrc.h - AML resource descriptors
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 465f02134b89..57895db3231a 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -280,7 +280,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
/*
* Get the return value and save as the last result
- * value. This is the only place where walk_state->return_desc
+ * value. This is the only place where walk_state->return_desc
* is set to anything other than zero!
*/
walk_state->return_desc = walk_state->operands[0];
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 3da6fd8530c5..b5b904ee815f 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -277,7 +277,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
*
* RETURN: Status
*
- * DESCRIPTION: Process all named fields in a field declaration. Names are
+ * DESCRIPTION: Process all named fields in a field declaration. Names are
* entered into the namespace.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index aa9a5d4e4052..52eb4e01622a 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -170,7 +170,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
*
* RETURN: Status
*
- * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
+ * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
* increments the thread count, and waits at the method semaphore
* for clearance to execute.
*
@@ -444,7 +444,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* RETURN: Status
*
* DESCRIPTION: Restart a method that was preempted by another (nested) method
- * invocation. Handle the return value (if any) from the callee.
+ * invocation. Handle the return value (if any) from the callee.
*
******************************************************************************/
@@ -530,7 +530,7 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* RETURN: None
*
- * DESCRIPTION: Terminate a control method. Delete everything that the method
+ * DESCRIPTION: Terminate a control method. Delete everything that the method
* created, delete all locals and arguments, and delete the parse
* tree if requested.
*
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8d55cebaa656..9a83b7e0f3ba 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -76,7 +76,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* RETURN: Status
*
* DESCRIPTION: Initialize the data structures that hold the method's arguments
- * and locals. The data struct is an array of namespace nodes for
+ * and locals. The data struct is an array of namespace nodes for
* each - this allows ref_of and de_ref_of to work properly for these
* special data types.
*
@@ -129,7 +129,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
*
* RETURN: None
*
- * DESCRIPTION: Delete method locals and arguments. Arguments are only
+ * DESCRIPTION: Delete method locals and arguments. Arguments are only
* deleted if this method was called from another method.
*
******************************************************************************/
@@ -183,7 +183,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
*
* RETURN: Status
*
- * DESCRIPTION: Initialize arguments for a method. The parameter list is a list
+ * DESCRIPTION: Initialize arguments for a method. The parameter list is a list
* of ACPI operand objects, either null terminated or whose length
* is defined by max_param_count.
*
@@ -401,7 +401,7 @@ acpi_ds_method_data_get_value(u8 type,
* This means that either 1) The expected argument was
* not passed to the method, or 2) A local variable
* was referenced by the method (via the ASL)
- * before it was initialized. Either case is an error.
+ * before it was initialized. Either case is an error.
*/
/* If slack enabled, init the local_x/arg_x to an Integer of value zero */
@@ -465,7 +465,7 @@ acpi_ds_method_data_get_value(u8 type,
*
* RETURN: None
*
- * DESCRIPTION: Delete the entry at Opcode:Index. Inserts
+ * DESCRIPTION: Delete the entry at Opcode:Index. Inserts
* a null into the stack slot after the object is deleted.
*
******************************************************************************/
@@ -523,7 +523,7 @@ acpi_ds_method_data_delete_value(u8 type,
*
* RETURN: Status
*
- * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
+ * DESCRIPTION: Store a value in an Arg or Local. The obj_desc is installed
* as the new value for the Arg or Local and the reference count
* for obj_desc is incremented.
*
@@ -566,7 +566,7 @@ acpi_ds_store_object_to_local(u8 type,
/*
* If the reference count on the object is more than one, we must
- * take a copy of the object before we store. A reference count
+ * take a copy of the object before we store. A reference count
* of exactly 1 means that the object was just created during the
* evaluation of an expression, and we can safely use it since it
* is not used anywhere else.
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 68592dd34960..c9f15d3a3686 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -293,7 +293,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*
* Second arg is the buffer data (optional) byte_list can be either
- * individual bytes or a string initializer. In either case, a
+ * individual bytes or a string initializer. In either case, a
* byte_list appears in the AML.
*/
arg = op->common.value.arg; /* skip first arg */
@@ -568,7 +568,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
/*
* Because of the execution pass through the non-control-method
- * parts of the table, we can arrive here twice. Only init
+ * parts of the table, we can arrive here twice. Only init
* the named object node the first time through
*/
if (acpi_ns_get_attached_object(node)) {
@@ -618,7 +618,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Initialize a namespace object from a parser Op and its
- * associated arguments. The namespace object is a more compact
+ * associated arguments. The namespace object is a more compact
* representation of the Op and its arguments.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index aa34d8984d34..0df024e5fb63 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -649,7 +649,8 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
(op->common.parent->common.aml_opcode !=
AML_VAR_PACKAGE_OP)
- && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
+ && (op->common.parent->common.aml_opcode !=
+ AML_NAME_OP))) {
walk_state->result_obj = obj_desc;
}
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 73a5447475f5..afeb99f49482 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("dsutils")
*
* RETURN: None.
*
- * DESCRIPTION: Clear and remove a reference on an implicit return value. Used
+ * DESCRIPTION: Clear and remove a reference on an implicit return value. Used
* to delete "stale" return values (if enabled, the return value
* from every operator is saved at least momentarily, in case the
* parent method exits.)
@@ -107,7 +107,7 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
*
* DESCRIPTION: Implements the optional "implicit return". We save the result
* of every ASL operator and control method invocation in case the
- * parent method exit. Before storing a new return value, we
+ * parent method exit. Before storing a new return value, we
* delete the previous return value.
*
******************************************************************************/
@@ -198,7 +198,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*
* If there is no parent, or the parent is a scope_op, we are executing
* at the method level. An executing method typically has no parent,
- * since each method is parsed separately. A method invoked externally
+ * since each method is parsed separately. A method invoked externally
* via execute_control_method has a scope_op as the parent.
*/
if ((!op->common.parent) ||
@@ -223,7 +223,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
}
/*
- * Decide what to do with the result based on the parent. If
+ * Decide what to do with the result based on the parent. If
* the parent opcode will not use the result, delete the object.
* Otherwise leave it as is, it will be deleted when it is used
* as an operand later.
@@ -266,7 +266,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
/*
* These opcodes allow term_arg(s) as operands and therefore
- * the operands can be method calls. The result is used.
+ * the operands can be method calls. The result is used.
*/
goto result_used;
@@ -284,7 +284,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
AML_BANK_FIELD_OP)) {
/*
* These opcodes allow term_arg(s) as operands and therefore
- * the operands can be method calls. The result is used.
+ * the operands can be method calls. The result is used.
*/
goto result_used;
}
@@ -329,9 +329,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*
* RETURN: Status
*
- * DESCRIPTION: Used after interpretation of an opcode. If there is an internal
+ * DESCRIPTION: Used after interpretation of an opcode. If there is an internal
* result descriptor, check if the parent opcode will actually use
- * this result. If not, delete the result now so that it will
+ * this result. If not, delete the result now so that it will
* not become orphaned.
*
******************************************************************************/
@@ -376,7 +376,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
*
* RETURN: Status
*
- * DESCRIPTION: Resolve all operands to their values. Used to prepare
+ * DESCRIPTION: Resolve all operands to their values. Used to prepare
* arguments to a control method invocation (a call from one
* method to another.)
*
@@ -391,7 +391,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
/*
* Attempt to resolve each of the valid operands
- * Method arguments are passed by reference, not by value. This means
+ * Method arguments are passed by reference, not by value. This means
* that the actual objects are passed, not copies of the objects.
*/
for (i = 0; i < walk_state->num_operands; i++) {
@@ -451,7 +451,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
* RETURN: Status
*
* DESCRIPTION: Translate a parse tree object that is an argument to an AML
- * opcode to the equivalent interpreter object. This may include
+ * opcode to the equivalent interpreter object. This may include
* looking up a name or entering a new name into the internal
* namespace.
*
@@ -496,9 +496,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/*
* Special handling for buffer_field declarations. This is a deferred
* opcode that unfortunately defines the field name as the last
- * parameter instead of the first. We get here when we are performing
+ * parameter instead of the first. We get here when we are performing
* the deferred execution, so the actual name of the field is already
- * in the namespace. We don't want to attempt to look it up again
+ * in the namespace. We don't want to attempt to look it up again
* because we may be executing in a different scope than where the
* actual opcode exists.
*/
@@ -560,7 +560,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* indicate this to the interpreter, set the
* object to the root
*/
- obj_desc = ACPI_CAST_PTR(union
+ obj_desc =
+ ACPI_CAST_PTR(union
acpi_operand_object,
acpi_gbl_root_node);
status = AE_OK;
@@ -604,8 +605,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/*
* If the name is null, this means that this is an
* optional result parameter that was not specified
- * in the original ASL. Create a Zero Constant for a
- * placeholder. (Store to a constant is a Noop.)
+ * in the original ASL. Create a Zero Constant for a
+ * placeholder. (Store to a constant is a Noop.)
*/
opcode = AML_ZERO_OP; /* Has no arguments! */
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 642f3c053e87..58593931be96 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -57,7 +57,7 @@ ACPI_MODULE_NAME("dswexec")
/*
* Dispatch table for opcode classes
*/
-static ACPI_EXECUTE_OP acpi_gbl_op_type_dispatch[] = {
+static acpi_execute_op acpi_gbl_op_type_dispatch[] = {
acpi_ex_opcode_0A_0T_1R,
acpi_ex_opcode_1A_0T_0R,
acpi_ex_opcode_1A_0T_1R,
@@ -204,7 +204,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Descending callback used during the execution of control
- * methods. This is where most operators and operands are
+ * methods. This is where most operators and operands are
* dispatched to the interpreter.
*
****************************************************************************/
@@ -297,7 +297,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
if (walk_state->walk_type & ACPI_WALK_METHOD) {
/*
* Found a named object declaration during method execution;
- * we must enter this object into the namespace. The created
+ * we must enter this object into the namespace. The created
* object is temporary and will be deleted upon completion of
* the execution of this method.
*
@@ -348,7 +348,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
* RETURN: Status
*
* DESCRIPTION: Ascending callback used during the execution of control
- * methods. The only thing we really need to do here is to
+ * methods. The only thing we really need to do here is to
* notice the beginning of IF, ELSE, and WHILE blocks.
*
****************************************************************************/
@@ -432,7 +432,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if (ACPI_SUCCESS(status)) {
/*
* Dispatch the request to the appropriate interpreter handler
- * routine. There is one routine per opcode "type" based upon the
+ * routine. There is one routine per opcode "type" based upon the
* number of opcode arguments and return type.
*/
status =
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 89c0114210c0..379835748357 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -254,7 +254,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_ut_get_type_name(node->type),
acpi_ut_get_node_name(node)));
- return (AE_AML_OPERAND_TYPE);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
@@ -602,7 +602,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
region_space,
walk_state);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
acpi_ex_exit_interpreter();
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index d0e6555061e4..3e65a15a735f 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -51,8 +51,9 @@
ACPI_MODULE_NAME("dswstate")
/* Local prototypes */
-static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
-static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
+static acpi_status
+acpi_ds_result_stack_push(struct acpi_walk_state *walk_state);
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state);
/*******************************************************************************
*
@@ -347,7 +348,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
*
* RETURN: Status
*
- * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
+ * DESCRIPTION: Pop this walk's object stack. Objects on the stack are NOT
* deleted by this routine.
*
******************************************************************************/
@@ -491,7 +492,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
* RETURN: A walk_state object popped from the thread's stack
*
* DESCRIPTION: Remove and return the walkstate object that is at the head of
- * the walk stack for the given walk list. NULL indicates that
+ * the walk stack for the given walk list. NULL indicates that
* the list is empty.
*
******************************************************************************/
@@ -531,14 +532,17 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
* RETURN: Pointer to the new walk state.
*
- * DESCRIPTION: Allocate and initialize a new walk state. The current walk
+ * DESCRIPTION: Allocate and initialize a new walk state. The current walk
* state is set to this new state.
*
******************************************************************************/
-struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
- *origin, union acpi_operand_object
- *method_desc, struct acpi_thread_state
+struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
+ union acpi_parse_object
+ *origin,
+ union acpi_operand_object
+ *method_desc,
+ struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
@@ -653,7 +657,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
/*
* Setup the current scope.
* Find a Named Op that has a namespace node associated with it.
- * search upwards from this Op. Current scope is the first
+ * search upwards from this Op. Current scope is the first
* Op with a namespace node.
*/
extra_op = parser_state->start_op;
@@ -704,13 +708,13 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
if (!walk_state) {
- return;
+ return_VOID;
}
if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
- return;
+ return_VOID;
}
/* There should not be any open scopes */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index ef0193d74b5d..36d120574423 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -89,7 +89,8 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
/* Set the mask bit only if there are references to this GPE */
if (gpe_event_info->runtime_count) {
- ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run,
+ (u8)register_bit);
}
return_ACPI_STATUS(AE_OK);
@@ -106,8 +107,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
* DESCRIPTION: Clear a GPE of stale events and enable it.
*
******************************************************************************/
-acpi_status
-acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
@@ -131,8 +131,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
}
/* Enable the requested GPE */
- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
@@ -150,7 +150,8 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status
+acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
@@ -191,7 +192,8 @@ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info
*
******************************************************************************/
-acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+acpi_status
+acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status = AE_OK;
@@ -208,7 +210,8 @@ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_i
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
- status = acpi_hw_low_set_gpe(gpe_event_info,
+ status =
+ acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_DISABLE);
}
@@ -306,7 +309,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A Non-NULL gpe_device means this is a GPE Block Device */
- obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ obj_desc =
+ acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 8cf4c104c7b7..1571a61a7833 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -486,7 +486,8 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index + gpe_block->block_base_number));
+ gpe_index +
+ gpe_block->block_base_number));
continue;
}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index cb50dd91bc18..228a0c3b1d49 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -374,7 +374,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
- } else if ((gpe_event_info->
+ } else
+ if ((gpe_event_info->
flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_NOTIFY) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 4c1c8261166f..1474241bfc7e 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -227,8 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
- status =
- acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
@@ -350,8 +349,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
- struct acpica_device_id *hid;
- struct acpica_device_id_list *cid;
+ struct acpi_pnp_device_id *hid;
+ struct acpi_pnp_device_id_list *cid;
u32 i;
u8 match;
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 7587eb6c9584..ae668f32cf16 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -398,7 +398,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
*
******************************************************************************/
acpi_status
-acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
+acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 87c5f2332260..3f30e753b652 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -221,7 +221,8 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
if (wake_device == ACPI_ROOT_OBJECT) {
device_node = acpi_gbl_root_node;
} else {
- device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ device_node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
}
/* Validate WakeDevice is of type Device */
@@ -324,7 +325,8 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
*
******************************************************************************/
-acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
+acpi_status
+acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
@@ -567,7 +569,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(gpe_device);
@@ -650,7 +652,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(gpe_device);
@@ -694,8 +696,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
* the FADT-defined gpe blocks. Otherwise, the GPE block device.
*
******************************************************************************/
-acpi_status
-acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
+acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device)
{
struct acpi_gpe_device_info info;
acpi_status status;
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bfb062e4c4b4..4492a4e03022 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -516,8 +516,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
string_length--;
}
- return_desc = acpi_ut_create_string_object((acpi_size)
- string_length);
+ return_desc =
+ acpi_ut_create_string_object((acpi_size) string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 691d4763102c..66554bc6f9a8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -78,7 +78,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
(target_node->type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
/*
* Dereference an existing alias so that we don't create a chain
- * of aliases. With this code, we guarantee that an alias is
+ * of aliases. With this code, we guarantee that an alias is
* always exactly one level of indirection away from the
* actual aliased name.
*/
@@ -90,7 +90,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
/*
* For objects that can never change (i.e., the NS node will
* permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
+ * the object to the new NS node. For other objects (such as
* Integers, buffers, etc.), we have to point the Alias node
* to the original Node.
*/
@@ -139,7 +139,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
/*
* The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
+ * to the same object. The reference count of the object has an
* additional reference to prevent deletion out from under either the
* target node or the alias Node
*/
@@ -243,8 +243,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
/* Init object and attach to NS node */
- obj_desc->mutex.sync_level =
- (u8) walk_state->operands[1]->integer.value;
+ obj_desc->mutex.sync_level = (u8)walk_state->operands[1]->integer.value;
obj_desc->mutex.node =
(struct acpi_namespace_node *)walk_state->operands[0];
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index bc5b9a6a1316..d7c9f51608a7 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -145,10 +145,10 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
case ACPI_TYPE_BUFFER:
acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
- acpi_ut_dump_buffer2(source_desc->buffer.pointer,
- (source_desc->buffer.length < 256) ?
- source_desc->buffer.length : 256,
- DB_BYTE_DISPLAY);
+ acpi_ut_dump_buffer(source_desc->buffer.pointer,
+ (source_desc->buffer.length < 256) ?
+ source_desc->buffer.length : 256,
+ DB_BYTE_DISPLAY, 0);
break;
case ACPI_TYPE_STRING:
@@ -190,7 +190,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
acpi_os_printf("Table Index 0x%X\n",
source_desc->reference.value);
- return;
+ return_VOID;
default:
break;
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 213c081776fc..858b43a7dcf6 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -464,7 +464,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
- if (!((ACPI_LV_EXEC & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_EXEC & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
@@ -777,7 +778,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
* PARAMETERS: title - Descriptive text
* value - Value to be displayed
*
- * DESCRIPTION: Object dump output formatting functions. These functions
+ * DESCRIPTION: Object dump output formatting functions. These functions
* reduce the number of format strings required and keeps them
* all in one place for easy modification.
*
@@ -810,7 +811,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
- if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
@@ -940,10 +942,11 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
acpi_os_printf("[Buffer] Length %.2X = ",
obj_desc->buffer.length);
if (obj_desc->buffer.length) {
- acpi_ut_dump_buffer(ACPI_CAST_PTR
- (u8, obj_desc->buffer.pointer),
- obj_desc->buffer.length,
- DB_DWORD_DISPLAY, _COMPONENT);
+ acpi_ut_debug_dump_buffer(ACPI_CAST_PTR
+ (u8,
+ obj_desc->buffer.pointer),
+ obj_desc->buffer.length,
+ DB_DWORD_DISPLAY, _COMPONENT);
} else {
acpi_os_printf("\n");
}
@@ -996,7 +999,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
- if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
+ if (!
+ ((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return_VOID;
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index dc092f5b35d6..ebc55fbf3ff7 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -59,7 +59,7 @@ ACPI_MODULE_NAME("exfield")
*
* RETURN: Status
*
- * DESCRIPTION: Read from a named field. Returns either an Integer or a
+ * DESCRIPTION: Read from a named field. Returns either an Integer or a
* Buffer, depending on the size of the field.
*
******************************************************************************/
@@ -149,7 +149,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
* Allocate a buffer for the contents of the field.
*
* If the field is larger than the current integer width, create
- * a BUFFER to hold it. Otherwise, use an INTEGER. This allows
+ * a BUFFER to hold it. Otherwise, use an INTEGER. This allows
* the use of arithmetic operators on the returned value if the
* field size is equal or smaller than an Integer.
*
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index a7784152ed30..aa2ccfb7cb61 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -54,8 +54,7 @@ ACPI_MODULE_NAME("exfldio")
/* Local prototypes */
static acpi_status
acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
- u32 field_datum_byte_offset,
- u64 *value, u32 read_write);
+ u32 field_datum_byte_offset, u64 *value, u32 read_write);
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value);
@@ -155,7 +154,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
#endif
/*
- * Validate the request. The entire request from the byte offset for a
+ * Validate the request. The entire request from the byte offset for a
* length of one field datum (access width) must fit within the region.
* (Region length is specified in bytes)
*/
@@ -183,7 +182,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
obj_desc->common_field.access_byte_width) {
/*
* This is the case where the access_type (acc_word, etc.) is wider
- * than the region itself. For example, a region of length one
+ * than the region itself. For example, a region of length one
* byte, and a field with Dword access specified.
*/
ACPI_ERROR((AE_INFO,
@@ -321,7 +320,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
*
* DESCRIPTION: Check if a value is out of range of the field being written.
* Used to check if the values written to Index and Bank registers
- * are out of range. Normally, the value is simply truncated
+ * are out of range. Normally, the value is simply truncated
* to fit the field, but this case is most likely a serious
* coding error in the ASL.
*
@@ -370,7 +369,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
*
* RETURN: Status
*
- * DESCRIPTION: Read or Write a single datum of a field. The field_type is
+ * DESCRIPTION: Read or Write a single datum of a field. The field_type is
* demultiplexed here to handle the different types of fields
* (buffer_field, region_field, index_field, bank_field)
*
@@ -860,7 +859,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
/*
* We must have a buffer that is at least as long as the field
- * we are writing to. This is because individual fields are
+ * we are writing to. This is because individual fields are
* indivisible and partial writes are not supported -- as per
* the ACPI specification.
*/
@@ -875,7 +874,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/*
* Copy the original data to the new buffer, starting
- * at Byte zero. All unused (upper) bytes of the
+ * at Byte zero. All unused (upper) bytes of the
* buffer will be 0.
*/
ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 271c0c57ea10..84058705ed12 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes
@@ -254,7 +253,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
- * Convert the second operand if necessary. The first operand
+ * Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
@@ -573,7 +572,7 @@ acpi_ex_do_logical_op(u16 opcode,
ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
- * Convert the second operand if necessary. The first operand
+ * Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI 3.0+ specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index bcceda5be9e3..d1f449d93dcf 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exmutex - ASL Mutex Acquire/Release functions
@@ -305,7 +304,7 @@ acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
ACPI_FUNCTION_TRACE(ex_release_mutex_object);
if (obj_desc->mutex.acquisition_depth == 0) {
- return (AE_NOT_ACQUIRED);
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
}
/* Match multiple Acquires with multiple Releases */
@@ -462,7 +461,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
union acpi_operand_object *next = thread->acquired_mutex_list;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_ENTRY();
+ ACPI_FUNCTION_NAME(ex_release_all_mutexes);
/* Traverse the list of owned mutexes, releasing each one */
@@ -474,6 +473,10 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
obj_desc->mutex.next = NULL;
obj_desc->mutex.acquisition_depth = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Force-releasing held mutex: %p\n",
+ obj_desc));
+
/* Release the mutex, special case for Global Lock */
if (obj_desc == acpi_gbl_global_lock_mutex) {
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index fcc75fa27d32..2ff578a16adc 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exnames - interpreter/scanner name load/execute
@@ -53,8 +52,7 @@ ACPI_MODULE_NAME("exnames")
/* Local prototypes */
static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs);
-static acpi_status
-acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
+static acpi_status acpi_ex_name_segment(u8 **in_aml_address, char *name_string);
/*******************************************************************************
*
@@ -64,7 +62,7 @@ acpi_ex_name_segment(u8 ** in_aml_address, char *name_string);
* (-1)==root, 0==none
* num_name_segs - count of 4-character name segments
*
- * RETURN: A pointer to the allocated string segment. This segment must
+ * RETURN: A pointer to the allocated string segment. This segment must
* be deleted by the caller.
*
* DESCRIPTION: Allocate a buffer for a name string. Ensure allocated name
@@ -178,7 +176,8 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
- for (index = 0; (index < ACPI_NAME_SIZE)
+ for (index = 0;
+ (index < ACPI_NAME_SIZE)
&& (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 9ba8c73cea16..bbf01e9bf057 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg1 - AML execution - opcodes with 1 argument
@@ -606,7 +605,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
}
/*
- * Set result to ONES (TRUE) if Value == 0. Note:
+ * Set result to ONES (TRUE) if Value == 0. Note:
* return_desc->Integer.Value is initially == 0 (FALSE) from above.
*/
if (!operand[0]->integer.value) {
@@ -618,7 +617,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
case AML_INCREMENT_OP: /* Increment (Operand) */
/*
- * Create a new integer. Can't just get the base integer and
+ * Create a new integer. Can't just get the base integer and
* increment it because it may be an Arg or Field.
*/
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -686,7 +685,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/*
* Note: The operand is not resolved at this point because we want to
- * get the associated object, not its value. For example, we don't
+ * get the associated object, not its value. For example, we don't
* want to resolve a field_unit to its value, we want the actual
* field_unit object.
*/
@@ -727,7 +726,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/*
* The type of the base object must be integer, buffer, string, or
- * package. All others are not supported.
+ * package. All others are not supported.
*
* NOTE: Integer is not specifically supported by the ACPI spec,
* but is supported implicitly via implicit operand conversion.
@@ -965,7 +964,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
case ACPI_TYPE_PACKAGE:
/*
- * Return the referenced element of the package. We must
+ * Return the referenced element of the package. We must
* add another reference to the referenced object, however.
*/
return_desc =
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 879e8a277b94..ee5634a074c4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -123,7 +123,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
/*
* Dispatch the notify to the appropriate handler
* NOTE: the request is queued for execution after this method
- * completes. The notify handlers are NOT invoked synchronously
+ * completes. The notify handlers are NOT invoked synchronously
* from this thread -- because handlers may in turn run other
* control methods.
*/
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 71fcc65c9ffa..2c89b4651f08 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg3 - AML execution - opcodes with 3 arguments
@@ -158,7 +157,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
case AML_MID_OP: /* Mid (Source[0], Index[1], Length[2], Result[3]) */
/*
- * Create the return object. The Source operand is guaranteed to be
+ * Create the return object. The Source operand is guaranteed to be
* either a String or a Buffer, so just use its type.
*/
return_desc = acpi_ut_create_internal_object((operand[0])->
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 0786b8659061..3e08695c3b30 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exoparg6 - AML execution - opcodes with 6 arguments
@@ -198,7 +197,7 @@ acpi_ex_do_match(u32 match_op,
return (FALSE);
}
- return logical_result;
+ return (logical_result);
}
/*******************************************************************************
@@ -269,7 +268,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
* and the next should be examined.
*
* Upon finding a match, the loop will terminate via "break" at
- * the bottom. If it terminates "normally", match_value will be
+ * the bottom. If it terminates "normally", match_value will be
* ACPI_UINT64_MAX (Ones) (its initial value) indicating that no
* match was found.
*/
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 81eca60d2748..ba9db4de7c89 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exprep - ACPI AML (p-code) execution - field prep utilities
@@ -78,8 +77,8 @@ acpi_ex_generate_access(u32 field_bit_offset,
* any_acc keyword.
*
* NOTE: Need to have the region_length in order to check for boundary
- * conditions (end-of-region). However, the region_length is a deferred
- * operation. Therefore, to complete this implementation, the generation
+ * conditions (end-of-region). However, the region_length is a deferred
+ * operation. Therefore, to complete this implementation, the generation
* of this access width must be deferred until the region length has
* been evaluated.
*
@@ -308,7 +307,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
* RETURN: Status
*
* DESCRIPTION: Initialize the areas of the field object that are common
- * to the various types of fields. Note: This is very "sensitive"
+ * to the various types of fields. Note: This is very "sensitive"
* code because we are solving the general case for field
* alignment.
*
@@ -336,13 +335,13 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
obj_desc->common_field.bit_length = field_bit_length;
/*
- * Decode the access type so we can compute offsets. The access type gives
+ * Decode the access type so we can compute offsets. The access type gives
* two pieces of information - the width of each field access and the
* necessary byte_alignment (address granularity) of the access.
*
* For any_acc, the access_bit_width is the largest width that is both
* necessary and possible in an attempt to access the whole field in one
- * I/O operation. However, for any_acc, the byte_alignment is always one
+ * I/O operation. However, for any_acc, the byte_alignment is always one
* byte.
*
* For all Buffer Fields, the byte_alignment is always one byte.
@@ -363,7 +362,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
/*
* base_byte_offset is the address of the start of the field within the
- * region. It is the byte address of the first *datum* (field-width data
+ * region. It is the byte address of the first *datum* (field-width data
* unit) of the field. (i.e., the first datum that contains at least the
* first *bit* of the field.)
*
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 1f1ce0c3d2f8..1db2c0bfde0b 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exregion - ACPI default op_region (address space) handlers
@@ -202,7 +201,7 @@ acpi_ex_system_memory_space_handler(u32 function,
* Perform the memory read or write
*
* Note: For machines that do not support non-aligned transfers, the target
- * address was checked for alignment above. We do not attempt to break the
+ * address was checked for alignment above. We do not attempt to break the
* transfer up into smaller (byte-size) chunks because the AML specifically
* asked for a transfer width that the hardware may require.
*/
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fa50e77e64a8..6239956786eb 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresnte - AML Interpreter object resolution
@@ -58,8 +57,8 @@ ACPI_MODULE_NAME("exresnte")
* PARAMETERS: object_ptr - Pointer to a location that contains
* a pointer to a NS node, and will receive a
* pointer to the resolved object.
- * walk_state - Current state. Valid only if executing AML
- * code. NULL if simply resolving an object
+ * walk_state - Current state. Valid only if executing AML
+ * code. NULL if simply resolving an object
*
* RETURN: Status
*
@@ -67,7 +66,7 @@ ACPI_MODULE_NAME("exresnte")
*
* Note: for some of the data types, the pointer attached to the Node
* can be either a pointer to an actual internal object or a pointer into the
- * AML stream itself. These types are currently:
+ * AML stream itself. These types are currently:
*
* ACPI_TYPE_INTEGER
* ACPI_TYPE_STRING
@@ -89,7 +88,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
/*
- * The stack pointer points to a struct acpi_namespace_node (Node). Get the
+ * The stack pointer points to a struct acpi_namespace_node (Node). Get the
* object that is attached to the Node.
*/
node = *object_ptr;
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index bbf40ac27585..cc176b245e22 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresolv - AML Interpreter object resolution
@@ -327,7 +326,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
*
* RETURN: Status
*
- * DESCRIPTION: Return the base object and type. Traverse a reference list if
+ * DESCRIPTION: Return the base object and type. Traverse a reference list if
* necessary to get to the base object.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index f232fbabdea8..b9ebff2f6a09 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exresop - AML Interpreter operand/object resolution
@@ -87,7 +86,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
if (type_needed == ACPI_TYPE_LOCAL_REFERENCE) {
/*
* Allow the AML "Constant" opcodes (Zero, One, etc.) to be reference
- * objects and thus allow them to be targets. (As per the ACPI
+ * objects and thus allow them to be targets. (As per the ACPI
* specification, a store to a constant is a noop.)
*/
if ((this_type == ACPI_TYPE_INTEGER) &&
@@ -337,7 +336,8 @@ acpi_ex_resolve_operands(u16 opcode,
if ((opcode == AML_STORE_OP) &&
((*stack_ptr)->common.type ==
ACPI_TYPE_LOCAL_REFERENCE)
- && ((*stack_ptr)->reference.class == ACPI_REFCLASS_INDEX)) {
+ && ((*stack_ptr)->reference.class ==
+ ACPI_REFCLASS_INDEX)) {
goto next_operand;
}
break;
@@ -638,7 +638,7 @@ acpi_ex_resolve_operands(u16 opcode,
if (acpi_gbl_enable_interpreter_slack) {
/*
* Enable original behavior of Store(), allowing any and all
- * objects as the source operand. The ACPI spec does not
+ * objects as the source operand. The ACPI spec does not
* allow this, however.
*/
break;
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 5fffe7ab5ece..90431f12f831 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -374,7 +374,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
* with the input value.
*
* When storing into an object the data is converted to the
- * target object type then stored in the object. This means
+ * target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
@@ -491,7 +491,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
acpi_ut_get_object_type_name(source_desc),
source_desc, node));
- /* No conversions for all other types. Just attach the source object */
+ /* No conversions for all other types. Just attach the source object */
status = acpi_ns_attach_object(node, source_desc,
source_desc->common.type);
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index b35bed52e061..87153bbc4b43 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstoren - AML Interpreter object store support,
@@ -61,7 +60,7 @@ ACPI_MODULE_NAME("exstoren")
*
* RETURN: Status, resolved object in source_desc_ptr.
*
- * DESCRIPTION: Resolve an object. If the object is a reference, dereference
+ * DESCRIPTION: Resolve an object. If the object is a reference, dereference
* it and return the actual object in the source_desc_ptr.
*
******************************************************************************/
@@ -93,7 +92,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
/*
* Stores into a Field/Region or into a Integer/Buffer/String
- * are all essentially the same. This case handles the
+ * are all essentially the same. This case handles the
* "interchangeable" types Integer, String, and Buffer.
*/
if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
@@ -167,7 +166,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
*
* RETURN: Status
*
- * DESCRIPTION: "Store" an object to another object. This may include
+ * DESCRIPTION: "Store" an object to another object. This may include
* converting the source type to the target type (implicit
* conversion), and a copy of the value of the source to
* the target.
@@ -178,14 +177,14 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
* with the input value.
*
* When storing into an object the data is converted to the
- * target object type then stored in the object. This means
+ * target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
* This module allows destination types of Number, String,
* Buffer, and Package.
*
- * Assumes parameters are already validated. NOTE: source_desc
+ * Assumes parameters are already validated. NOTE: source_desc
* resolution (from a reference object) must be performed by
* the caller if necessary.
*
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 53c248473547..b5f339cb1305 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exstorob - AML Interpreter object store support, store to object
@@ -108,7 +107,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
#ifdef ACPI_OBSOLETE_BEHAVIOR
/*
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
- * truncated if the string is smaller than the buffer. However, "other"
+ * truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
* standard. ACPI 3.0A changes this behavior such that the buffer
* is no longer truncated.
@@ -117,7 +116,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/*
* OBSOLETE BEHAVIOR:
* If the original source was a string, we must truncate the buffer,
- * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
+ * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer
* copy must not truncate the original buffer.
*/
if (original_src_type == ACPI_TYPE_STRING) {
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index b760641e2fc6..c8a0ad5c1f55 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exsystem - Interface to OS services
@@ -59,7 +58,7 @@ ACPI_MODULE_NAME("exsystem")
* RETURN: Status
*
* DESCRIPTION: Implements a semaphore wait with a check to see if the
- * semaphore is available immediately. If it is not, the
+ * semaphore is available immediately. If it is not, the
* interpreter is released before waiting.
*
******************************************************************************/
@@ -104,7 +103,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
* RETURN: Status
*
* DESCRIPTION: Implements a mutex wait with a check to see if the
- * mutex is available immediately. If it is not, the
+ * mutex is available immediately. If it is not, the
* interpreter is released before waiting.
*
******************************************************************************/
@@ -152,7 +151,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
* DESCRIPTION: Suspend running thread for specified amount of time.
* Note: ACPI specification requires that Stall() does not
* relinquish the processor, and delays longer than 100 usec
- * should use Sleep() instead. We allow stalls up to 255 usec
+ * should use Sleep() instead. We allow stalls up to 255 usec
* for compatibility with other interpreters and existing BIOSs.
*
******************************************************************************/
@@ -254,7 +253,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
* RETURN: Status
*
* DESCRIPTION: Provides an access point to perform synchronization operations
- * within the AML. This operation is a request to wait for an
+ * within the AML. This operation is a request to wait for an
* event.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index d1ab7917eed7..264d22d8018c 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: exutils - interpreter/scanner utilities
@@ -45,12 +44,12 @@
/*
* DEFINE_AML_GLOBALS is tested in amlcode.h
* to determine whether certain global names should be "defined" or only
- * "declared" in the current compilation. This enhances maintainability
+ * "declared" in the current compilation. This enhances maintainability
* by enabling a single header file to embody all knowledge of the names
* in question.
*
* Exactly one module of any executable should #define DEFINE_GLOBALS
- * before #including the header files which use this convention. The
+ * before #including the header files which use this convention. The
* names in question will be defined and initialized in that module,
* and declared as extern in all other modules which #include those
* header files.
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index a1e71d0ef57b..90a9aea1cee9 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
@@ -136,7 +135,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
*
* RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY
*
- * DESCRIPTION: Return current operating state of system. Determined by
+ * DESCRIPTION: Return current operating state of system. Determined by
* querying the SCI_EN bit.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index db4076580e2b..64560045052d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwgpe - Low level GPE enable/disable/clear functions
@@ -339,7 +338,8 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block, void *context)
+ struct acpi_gpe_block_info * gpe_block,
+ void *context)
{
u32 i;
acpi_status status;
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 1455ddcdc32c..65bc3453a29c 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -259,7 +259,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
status = acpi_hw_get_pci_device_info(pci_id, info->device,
&bus_number, &is_bridge);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ return (status);
}
info = info->next;
@@ -271,7 +271,7 @@ acpi_hw_process_pci_list(struct acpi_pci_id *pci_id,
pci_id->segment, pci_id->bus, pci_id->device,
pci_id->function, status, bus_number, is_bridge));
- return_ACPI_STATUS(AE_OK);
+ return (AE_OK);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 4af6d20ef077..f4e57503576b 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -1,4 +1,3 @@
-
/*******************************************************************************
*
* Module Name: hwregs - Read/write access functions for the various ACPI
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b6411f16832f..bfdce22f3798 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Name: hwtimer.c - ACPI Power Management Timer Interface
@@ -101,8 +100,7 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status =
- acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
return_ACPI_STATUS(status);
}
@@ -129,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
* a versatile and accurate timer.
*
* Note that this function accommodates only a single timer
- * rollover. Thus for 24-bit timers, this function should only
+ * rollover. Thus for 24-bit timers, this function should only
* be used for calculating durations less than ~4.6 seconds
* (~20 minutes for 32-bit timers) -- calculations below:
*
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index c99d546b217f..b6aae58299dc 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwvalid - I/O request validation
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 7bfd649d1996..05a154c3c9ac 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -1,4 +1,3 @@
-
/******************************************************************************
*
* Module Name: hwxface - Public ACPICA hardware interfaces
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 0ff1ecea5c3a..ae443fe2ebf6 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -49,8 +49,7 @@
ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
-static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
+static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/*
* Dispatch table used to efficiently branch to the various sleep
@@ -234,8 +233,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
* function.
*
******************************************************************************/
-static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
+static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
{
acpi_status status;
struct acpi_sleep_functions *sleep_functions =
@@ -369,8 +367,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
- status =
- acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
+ status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -396,8 +393,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
status =
- acpi_hw_sleep_dispatch(sleep_state,
- ACPI_WAKE_PREP_FUNCTION_ID);
+ acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 23db53ce2293..d70eaf39dfdf 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -110,11 +110,11 @@ acpi_status acpi_ns_root_initialize(void)
status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_NO_UPSEARCH, NULL, &new_node);
-
- if (ACPI_FAILURE(status) || (!new_node)) { /* Must be on same line for code converter */
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create predefined name %s",
init_val->name));
+ continue;
}
/*
@@ -179,8 +179,7 @@ acpi_status acpi_ns_root_initialize(void)
/* Build an object around the static string */
- obj_desc->string.length =
- (u32) ACPI_STRLEN(val);
+ obj_desc->string.length = (u32)ACPI_STRLEN(val);
obj_desc->string.pointer = val;
obj_desc->common.flags |= AOPOBJ_STATIC_POINTER;
break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index ac389e5bb594..15143c44f5e5 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -332,7 +332,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
*
* RETURN: None.
*
- * DESCRIPTION: Delete a subtree of the namespace. This includes all objects
+ * DESCRIPTION: Delete a subtree of the namespace. This includes all objects
* stored within the subtree.
*
******************************************************************************/
@@ -418,7 +418,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
* RETURN: Status
*
* DESCRIPTION: Delete entries within the namespace that are owned by a
- * specific ID. Used to delete entire ACPI tables. All
+ * specific ID. Used to delete entire ACPI tables. All
* reference counts are updated.
*
* MUTEX: Locks namespace during deletion walk.
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 2526aaf945ee..924b3c71473a 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -209,14 +209,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
"Invalid ACPI Object Type 0x%08X", type));
}
- if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
- this_node->name.integer =
- acpi_ut_repair_name(this_node->name.ascii);
-
- ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
- this_node->name.integer));
- }
-
acpi_os_printf("%4.4s", acpi_ut_get_node_name(this_node));
}
@@ -700,7 +692,7 @@ void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level)
*
* PARAMETERS: search_base - Root of subtree to be dumped, or
* NS_ALL to dump the entire namespace
- * max_depth - Maximum depth of dump. Use INT_MAX
+ * max_depth - Maximum depth of dump. Use INT_MAX
* for an effectively unlimited depth.
*
* RETURN: None
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 95ffe8dfa1f1..4328e2adfeb9 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -96,8 +96,8 @@ acpi_status acpi_ns_initialize_objects(void)
/* Walk entire namespace from the supplied root */
status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, acpi_ns_init_one_object, NULL,
- &info, NULL);
+ ACPI_UINT32_MAX, acpi_ns_init_one_object,
+ NULL, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 76935ff29289..911f99127b99 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -80,8 +80,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
/*
* Parse the table and load the namespace with all named
- * objects found within. Control methods are NOT parsed
- * at this time. In fact, the control methods cannot be
+ * objects found within. Control methods are NOT parsed
+ * at this time. In fact, the control methods cannot be
* parsed until the entire namespace is loaded, because
* if a control method makes a forward reference (call)
* to another control method, we can't continue parsing
@@ -122,7 +122,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
}
/*
- * Now we can parse the control methods. We always parse
+ * Now we can parse the control methods. We always parse
* them here for a sanity check, and if configured for
* just-in-time parsing, we delete the control method
* parse trees.
@@ -166,7 +166,7 @@ acpi_status acpi_ns_load_namespace(void)
}
/*
- * Load the namespace. The DSDT is required,
+ * Load the namespace. The DSDT is required,
* but the SSDT and PSDT tables are optional.
*/
status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
@@ -283,7 +283,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
* RETURN: Status
*
* DESCRIPTION: Shrinks the namespace, typically in response to an undocking
- * event. Deletes an entire subtree starting from (and
+ * event. Deletes an entire subtree starting from (and
* including) the given handle.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 96e0eb609bb4..55a175eadcc3 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -195,7 +195,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
ACPI_ERROR((AE_INFO,
"Invalid Namespace Node (%p) while traversing namespace",
next_node));
- return 0;
+ return (0);
}
size += ACPI_PATH_SEGMENT_LENGTH;
next_node = next_node->parent;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index d6c9a3cc6716..e69f7fa2579d 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("nsobject")
* RETURN: Status
*
* DESCRIPTION: Record the given object as the value associated with the
- * name whose acpi_handle is passed. If Object is NULL
+ * name whose acpi_handle is passed. If Object is NULL
* and Type is ACPI_TYPE_ANY, set the name as having no value.
* Note: Future may require that the Node->Flags field be passed
* as a parameter.
@@ -133,7 +133,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
((struct acpi_namespace_node *)object)->object) {
/*
* Value passed is a name handle and that name has a
- * non-null value. Use that name's value and type.
+ * non-null value. Use that name's value and type.
*/
obj_desc = ((struct acpi_namespace_node *)object)->object;
object_type = ((struct acpi_namespace_node *)object)->type;
@@ -321,7 +321,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
*
* RETURN: Status
*
- * DESCRIPTION: Low-level attach data. Create and attach a Data object.
+ * DESCRIPTION: Low-level attach data. Create and attach a Data object.
*
******************************************************************************/
@@ -377,7 +377,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
*
* RETURN: Status
*
- * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
+ * DESCRIPTION: Low-level detach data. Delete the data node, but the caller
* is responsible for the actual data.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index ec7ba2d3463c..233f756d5cfa 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -168,11 +168,11 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
/*
* AML Parse, pass 1
*
- * In this pass, we load most of the namespace. Control methods
- * are not parsed until later. A parse tree is not created. Instead,
- * each Parser Op subtree is deleted when it is finished. This saves
+ * In this pass, we load most of the namespace. Control methods
+ * are not parsed until later. A parse tree is not created. Instead,
+ * each Parser Op subtree is deleted when it is finished. This saves
* a great deal of memory, and allows a small cache of parse objects
- * to service the entire parse. The second pass of the parse then
+ * to service the entire parse. The second pass of the parse then
* performs another complete parse of the AML.
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 456cc859f869..1d2d8ffc1bc5 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -314,22 +314,7 @@ acpi_ns_search_and_enter(u32 target_name,
* this problem, and we want to be able to enable ACPI support for them,
* even though there are a few bad names.
*/
- if (!acpi_ut_valid_acpi_name(target_name)) {
- target_name =
- acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
-
- /* Report warning only if in strict mode or debug mode */
-
- if (!acpi_gbl_enable_interpreter_slack) {
- ACPI_WARNING((AE_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- ACPI_CAST_PTR(char, &target_name)));
- } else {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found bad character(s) in name, repaired: [%4.4s]\n",
- ACPI_CAST_PTR(char, &target_name)));
- }
- }
+ acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
/* Try to find the name in the namespace level specified by the caller */
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index ef753a41e087..b5b4cb72a8a8 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -530,7 +530,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
((num_segments > 0) ? (num_segments - 1) : 0) + 1;
/*
- * Check to see if we're still in bounds. If not, there's a problem
+ * Check to see if we're still in bounds. If not, there's a problem
* with internal_name (invalid format).
*/
if (required_length > internal_name_length) {
@@ -557,10 +557,14 @@ acpi_ns_externalize_name(u32 internal_name_length,
(*converted_name)[j++] = '.';
}
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
- (*converted_name)[j++] = internal_name[names_index++];
+ /* Copy and validate the 4-char name segment */
+
+ ACPI_MOVE_NAME(&(*converted_name)[j],
+ &internal_name[names_index]);
+ acpi_ut_repair_name(&(*converted_name)[j]);
+
+ j += ACPI_NAME_SIZE;
+ names_index += ACPI_NAME_SIZE;
}
}
@@ -681,7 +685,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
- * root of the name space. If Name is fully
+ * root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
* flags - Used to indicate whether to perform upsearch or
@@ -689,7 +693,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
* return_node - Where the Node is returned
*
* DESCRIPTION: Look up a name relative to a given scope and return the
- * corresponding Node. NOTE: Scope can be null.
+ * corresponding Node. NOTE: Scope can be null.
*
* MUTEX: Locks namespace
*
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 730bccc5e7f7..0483877f26b8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -60,8 +60,8 @@ ACPI_MODULE_NAME("nswalk")
* RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
* none is found.
*
- * DESCRIPTION: Return the next peer node within the namespace. If Handle
- * is valid, Scope is ignored. Otherwise, the first node
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
* within Scope is returned.
*
******************************************************************************/
@@ -97,8 +97,8 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
* RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
* none is found.
*
- * DESCRIPTION: Return the next peer node within the namespace. If Handle
- * is valid, Scope is ignored. Otherwise, the first node
+ * DESCRIPTION: Return the next peer node within the namespace. If Handle
+ * is valid, Scope is ignored. Otherwise, the first node
* within Scope is returned.
*
******************************************************************************/
@@ -305,7 +305,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
/*
* Depth first search: Attempt to go down another level in the
- * namespace if we are allowed to. Don't go any further if we have
+ * namespace if we are allowed to. Don't go any further if we have
* reached the caller specified maximum depth or if the user
* function has specified that the maximum depth has been reached.
*/
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 9692e6702333..d6a9f77972b6 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,16 +61,16 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
- * terminated by NULL. May be NULL
+ * terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
- * any). If NULL, no value is returned.
+ * any). If NULL, no value is returned.
* return_type - Expected type of return object
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
- * parameters if necessary. One of "Handle" or "Pathname" must
+ * parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
@@ -155,15 +155,15 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
- * terminated by NULL. May be NULL
+ * terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
- * any). If NULL, no value is returned.
+ * any). If NULL, no value is returned.
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
- * parameters if necessary. One of "Handle" or "Pathname" must
+ * parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
@@ -542,15 +542,15 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
- struct acpica_device_id *hid;
- struct acpica_device_id_list *cid;
+ struct acpi_pnp_device_id *hid;
+ struct acpi_pnp_device_id_list *cid;
u32 i;
u8 found;
int no_match;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
node = acpi_ns_validate_handle(obj_handle);
@@ -656,7 +656,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
* DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
* starting (and ending) at the object specified by start_handle.
* The user_function is called whenever an object of type
- * Device is found. If the user function returns
+ * Device is found. If the user function returns
* a non-zero value, the search is terminated immediately and this
* value is returned to the caller.
*
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 08e9610b34ca..811c6f13f476 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -53,8 +53,8 @@
ACPI_MODULE_NAME("nsxfname")
/* Local prototypes */
-static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
- struct acpica_device_id *source,
+static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
+ struct acpi_pnp_device_id *source,
char *string_area);
/******************************************************************************
@@ -69,8 +69,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
* RETURN: Status
*
* DESCRIPTION: This routine will search for a caller specified name in the
- * name space. The caller can restrict the search region by
- * specifying a non NULL parent. The parent value is itself a
+ * name space. The caller can restrict the search region by
+ * specifying a non NULL parent. The parent value is itself a
* namespace handle.
*
******************************************************************************/
@@ -149,7 +149,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
* RETURN: Pointer to a string containing the fully qualified Name.
*
* DESCRIPTION: This routine returns the fully qualified name associated with
- * the Handle parameter. This and the acpi_pathname_to_handle are
+ * the Handle parameter. This and the acpi_pathname_to_handle are
* complementary functions.
*
******************************************************************************/
@@ -202,8 +202,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
- ACPI_STRNCPY(buffer->pointer, acpi_ut_get_node_name(node),
- ACPI_NAME_SIZE);
+ ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node));
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
@@ -219,20 +218,21 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
*
* FUNCTION: acpi_ns_copy_device_id
*
- * PARAMETERS: dest - Pointer to the destination DEVICE_ID
- * source - Pointer to the source DEVICE_ID
+ * PARAMETERS: dest - Pointer to the destination PNP_DEVICE_ID
+ * source - Pointer to the source PNP_DEVICE_ID
* string_area - Pointer to where to copy the dest string
*
* RETURN: Pointer to the next string area
*
- * DESCRIPTION: Copy a single DEVICE_ID, including the string data.
+ * DESCRIPTION: Copy a single PNP_DEVICE_ID, including the string data.
*
******************************************************************************/
-static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
- struct acpica_device_id *source,
+static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
+ struct acpi_pnp_device_id *source,
char *string_area)
{
- /* Create the destination DEVICE_ID */
+
+ /* Create the destination PNP_DEVICE_ID */
dest->string = string_area;
dest->length = source->length;
@@ -256,8 +256,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
* namespace node and possibly by running several standard
* control methods (Such as in the case of a device.)
*
- * For Device and Processor objects, run the Device _HID, _UID, _CID, _STA,
- * _ADR, _sx_w, and _sx_d methods.
+ * For Device and Processor objects, run the Device _HID, _UID, _CID, _SUB,
+ * _STA, _ADR, _sx_w, and _sx_d methods.
*
* Note: Allocates the return buffer, must be freed by the caller.
*
@@ -269,9 +269,10 @@ acpi_get_object_info(acpi_handle handle,
{
struct acpi_namespace_node *node;
struct acpi_device_info *info;
- struct acpica_device_id_list *cid_list = NULL;
- struct acpica_device_id *hid = NULL;
- struct acpica_device_id *uid = NULL;
+ struct acpi_pnp_device_id_list *cid_list = NULL;
+ struct acpi_pnp_device_id *hid = NULL;
+ struct acpi_pnp_device_id *uid = NULL;
+ struct acpi_pnp_device_id *sub = NULL;
char *next_id_string;
acpi_object_type type;
acpi_name name;
@@ -316,7 +317,7 @@ acpi_get_object_info(acpi_handle handle,
if ((type == ACPI_TYPE_DEVICE) || (type == ACPI_TYPE_PROCESSOR)) {
/*
* Get extra info for ACPI Device/Processor objects only:
- * Run the Device _HID, _UID, and _CID methods.
+ * Run the Device _HID, _UID, _SUB, and _CID methods.
*
* Note: none of these methods are required, so they may or may
* not be present for this device. The Info->Valid bitfield is used
@@ -339,6 +340,14 @@ acpi_get_object_info(acpi_handle handle,
valid |= ACPI_VALID_UID;
}
+ /* Execute the Device._SUB method */
+
+ status = acpi_ut_execute_SUB(node, &sub);
+ if (ACPI_SUCCESS(status)) {
+ info_size += sub->length;
+ valid |= ACPI_VALID_SUB;
+ }
+
/* Execute the Device._CID method */
status = acpi_ut_execute_CID(node, &cid_list);
@@ -348,7 +357,7 @@ acpi_get_object_info(acpi_handle handle,
info_size +=
(cid_list->list_size -
- sizeof(struct acpica_device_id_list));
+ sizeof(struct acpi_pnp_device_id_list));
valid |= ACPI_VALID_CID;
}
}
@@ -418,16 +427,17 @@ acpi_get_object_info(acpi_handle handle,
next_id_string = ACPI_CAST_PTR(char, info->compatible_id_list.ids);
if (cid_list) {
- /* Point past the CID DEVICE_ID array */
+ /* Point past the CID PNP_DEVICE_ID array */
next_id_string +=
((acpi_size) cid_list->count *
- sizeof(struct acpica_device_id));
+ sizeof(struct acpi_pnp_device_id));
}
/*
- * Copy the HID, UID, and CIDs to the return buffer. The variable-length
- * strings are copied to the reserved area at the end of the buffer.
+ * Copy the HID, UID, SUB, and CIDs to the return buffer.
+ * The variable-length strings are copied to the reserved area
+ * at the end of the buffer.
*
* For HID and CID, check if the ID is a PCI Root Bridge.
*/
@@ -445,6 +455,11 @@ acpi_get_object_info(acpi_handle handle,
uid, next_id_string);
}
+ if (sub) {
+ next_id_string = acpi_ns_copy_device_id(&info->subsystem_id,
+ sub, next_id_string);
+ }
+
if (cid_list) {
info->compatible_id_list.count = cid_list->count;
info->compatible_id_list.list_size = cid_list->list_size;
@@ -481,6 +496,9 @@ acpi_get_object_info(acpi_handle handle,
if (uid) {
ACPI_FREE(uid);
}
+ if (sub) {
+ ACPI_FREE(sub);
+ }
if (cid_list) {
ACPI_FREE(cid_list);
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 6766fc4f088f..9d029dac6b64 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -220,8 +220,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
*
* RETURN: Status
*
- * DESCRIPTION: Return the next peer object within the namespace. If Handle is
- * valid, Scope is ignored. Otherwise, the first object within
+ * DESCRIPTION: Return the next peer object within the namespace. If Handle is
+ * valid, Scope is ignored. Otherwise, the first object within
* Scope is returned.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 844464c4f901..cb79e2d4d743 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -120,7 +120,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
* RETURN: Pointer to end-of-package +1
*
* DESCRIPTION: Get next package length and return a pointer past the end of
- * the package. Consumes the package length field
+ * the package. Consumes the package length field
*
******************************************************************************/
@@ -147,8 +147,8 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
* RETURN: Pointer to the start of the name string (pointer points into
* the AML.
*
- * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name
- * prefix characters. Set parser state to point past the string.
+ * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name
+ * prefix characters. Set parser state to point past the string.
* (Name is consumed from the AML.)
*
******************************************************************************/
@@ -220,7 +220,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
*
* DESCRIPTION: Get next name (if method call, return # of required args).
* Names are looked up in the internal namespace to determine
- * if the name represents a control method. If a method
+ * if the name represents a control method. If a method
* is found, the number of arguments to the method is returned.
* This information is critical for parsing to continue correctly.
*
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 799162c1b6df..5607805aab26 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -133,18 +133,46 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
case AML_CLASS_UNKNOWN:
- /* The opcode is unrecognized. Just skip unknown opcodes */
+ /* The opcode is unrecognized. Complain and skip unknown opcodes */
- ACPI_ERROR((AE_INFO,
- "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
- walk_state->opcode, walk_state->parser_state.aml,
- walk_state->aml_offset));
+ if (walk_state->pass_number == 2) {
+ ACPI_ERROR((AE_INFO,
+ "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header))));
- ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
+ ACPI_DUMP_BUFFER(walk_state->parser_state.aml - 16, 48);
- /* Assume one-byte bad opcode */
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * This is executed for the disassembler only. Output goes
+ * to the disassembled ASL output file.
+ */
+ acpi_os_printf
+ ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
+ walk_state->opcode,
+ (u32)(walk_state->aml_offset +
+ sizeof(struct acpi_table_header)));
+
+ /* Dump the context surrounding the invalid opcode */
+
+ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
+ aml - 16), 48, DB_BYTE_DISPLAY,
+ walk_state->aml_offset +
+ sizeof(struct acpi_table_header) -
+ 16);
+ acpi_os_printf(" */\n");
+#endif
+ }
+
+ /* Increment past one-byte or two-byte opcode */
walk_state->parser_state.aml++;
+ if (walk_state->opcode > 0xFF) { /* Can only happen if first byte is 0x5B */
+ walk_state->parser_state.aml++;
+ }
+
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
default:
@@ -519,11 +547,18 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
if ((op_info->class ==
AML_CLASS_EXECUTE) && (!arg)) {
ACPI_WARNING((AE_INFO,
- "Detected an unsupported executable opcode "
- "at module-level: [0x%.4X] at table offset 0x%.4X",
- op->common.aml_opcode,
- (u32)((aml_op_start - walk_state->parser_state.aml_start)
- + sizeof(struct acpi_table_header))));
+ "Unsupported module-level executable opcode "
+ "0x%.2X at table offset 0x%.4X",
+ op->common.
+ aml_opcode,
+ (u32)
+ (ACPI_PTR_DIFF
+ (aml_op_start,
+ walk_state->
+ parser_state.
+ aml_start) +
+ sizeof(struct
+ acpi_table_header))));
}
}
break;
@@ -843,8 +878,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
*op = NULL;
}
- ACPI_PREEMPTION_POINT();
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index ed1d457bd5ca..1793d934aa30 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -59,7 +59,7 @@ static const u8 acpi_gbl_argument_count[] =
*
* DESCRIPTION: Opcode table. Each entry contains <opcode, type, name, operands>
* The name is a simple ascii string, the operand specifier is an
- * ascii string with one letter per operand. The letter specifies
+ * ascii string with one letter per operand. The letter specifies
* the operand type.
*
******************************************************************************/
@@ -183,7 +183,7 @@ static const u8 acpi_gbl_argument_count[] =
******************************************************************************/
/*
- * Master Opcode information table. A summary of everything we know about each
+ * Master Opcode information table. A summary of everything we know about each
* opcode, all in one place.
*/
const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
@@ -392,10 +392,12 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
+ AML_CONSTANT),
/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
- AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
+ AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC |
+ AML_CONSTANT),
/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
@@ -495,7 +497,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
AML_NSNODE | AML_NAMED | AML_DEFER),
/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD),
/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
@@ -519,12 +522,13 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD),
/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
- ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
- AML_TYPE_NAMED_FIELD,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
- AML_DEFER),
+ ACPI_TYPE_LOCAL_BANK_FIELD,
+ AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_FIELD | AML_DEFER),
/* Internal opcodes that map to invalid AML opcodes */
@@ -632,7 +636,8 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
AML_TYPE_NAMED_NO_OBJ,
- AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
+ AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
+ AML_NSNODE),
/* ACPI 3.0 opcodes */
@@ -695,7 +700,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/*
* This table is indexed by the second opcode of the extended opcode
- * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
+ * pair. It returns an index into the opcode table (acpi_gbl_aml_op_info)
*/
static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
/* 0 1 2 3 4 5 6 7 */
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 01985703bb98..2494caf47755 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -43,9 +43,9 @@
/*
* Parse the AML and build an operation tree as most interpreters,
- * like Perl, do. Parsing is done by hand rather than with a YACC
+ * like Perl, do. Parsing is done by hand rather than with a YACC
* generated parser to tightly constrain stack and dynamic memory
- * usage. At the same time, parsing is kept flexible and the code
+ * usage. At the same time, parsing is kept flexible and the code
* fairly compact by parsing based on a list of AML opcode
* templates in aml_op_info[]
*/
@@ -379,7 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
case AE_CTRL_FALSE:
/*
* Either an IF/WHILE Predicate was false or we encountered a BREAK
- * opcode. In both cases, we do not execute the rest of the
+ * opcode. In both cases, we do not execute the rest of the
* package; We simply close out the parent (finishing the walk of
* this branch of the tree) and continue execution at the parent
* level.
@@ -459,8 +459,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
/* Executing a control method - additional cleanup */
- acpi_ds_terminate_control_method(
- walk_state->method_desc, walk_state);
+ acpi_ds_terminate_control_method(walk_state->
+ method_desc,
+ walk_state);
}
acpi_ds_delete_walk_state(walk_state);
@@ -487,7 +488,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
acpi_gbl_current_walk_list = thread;
/*
- * Execute the walk loop as long as there is a valid Walk State. This
+ * Execute the walk loop as long as there is a valid Walk State. This
* handles nested control method invocations without recursion.
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "State=%p\n", walk_state));
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 8736ad5f04d3..4137dcb352d1 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -108,7 +108,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
* RETURN: Pointer to the new Op, null on failure
*
* DESCRIPTION: Allocate an acpi_op, choose op type (and thus size) based on
- * opcode. A cache of opcodes is available for the pure
+ * opcode. A cache of opcodes is available for the pure
* GENERIC_OP, since this is by far the most commonly used.
*
******************************************************************************/
@@ -164,7 +164,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
*
* RETURN: None.
*
- * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list
+ * DESCRIPTION: Free an Op object. Either put it on the GENERIC_OP cache list
* or actually free it.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index de12469d1c9c..147feb6aa2a0 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -457,6 +457,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Get the number of vendor data bytes
*/
extra_struct_bytes = resource_length;
+
+ /*
+ * There is already one byte included in the minimum
+ * descriptor size. If there are extra struct bytes,
+ * subtract one from the count.
+ */
+ if (extra_struct_bytes) {
+ extra_struct_bytes--;
+ }
break;
case ACPI_RESOURCE_NAME_END_TAG:
@@ -601,7 +610,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
/*
* Calculate the size of the return buffer.
* The base size is the number of elements * the sizes of the
- * structures. Additional space for the strings is added below.
+ * structures. Additional space for the strings is added below.
* The minus one is to subtract the size of the u8 Source[1]
* member because it is added below.
*
@@ -664,8 +673,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
(*sub_object_list)->string.
length + 1);
} else {
- temp_size_needed +=
- acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
+ temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 46b5324b22d6..8b64db9a3fd2 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -109,7 +109,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
ACPI_ERROR((AE_INFO,
"Invalid/unsupported resource descriptor: Type 0x%2.2X",
resource_index));
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
/* Convert the AML byte stream resource to a local resource struct */
@@ -200,7 +200,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
ACPI_ERROR((AE_INFO,
"Invalid/unsupported resource descriptor: Type 0x%2.2X",
resource->type));
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
status = acpi_rs_convert_resource_to_aml(resource,
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 57deae166577..77d1db29a725 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -77,7 +77,7 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
- ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
+ ACPI_MOVE_NAME(header.signature, signature);
ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 70f9d787c82c..f540ae462925 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -526,6 +526,8 @@ void acpi_tb_terminate(void)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index b6cea30da638..285e24b97382 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -354,7 +354,7 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
sum = (u8) (sum + *(buffer++));
}
- return sum;
+ return (sum);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 21101262e47a..f5632780421d 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -236,7 +236,7 @@ acpi_get_table_header(char *signature,
sizeof(struct
acpi_table_header));
if (!header) {
- return AE_NO_MEMORY;
+ return (AE_NO_MEMORY);
}
ACPI_MEMCPY(out_table_header, header,
sizeof(struct acpi_table_header));
@@ -244,7 +244,7 @@ acpi_get_table_header(char *signature,
sizeof(struct
acpi_table_header));
} else {
- return AE_NOT_FOUND;
+ return (AE_NOT_FOUND);
}
} else {
ACPI_MEMCPY(out_table_header,
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index f87cc63e69a1..a5e1e4e47098 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -211,7 +211,7 @@ static acpi_status acpi_tb_load_namespace(void)
* DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must
* be a valid ACPI table with a valid ACPI table header.
* Note1: Mainly intended to support hotplug addition of SSDTs.
- * Note2: Does not copy the incoming table. User is reponsible
+ * Note2: Does not copy the incoming table. User is responsible
* to ensure that the table is not deleted or unmapped.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 74e720800037..28f330230f99 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -67,7 +67,6 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
{
- ACPI_FUNCTION_ENTRY();
/*
* The signature and checksum must both be correct
@@ -108,7 +107,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* RETURN: Status, RSDP physical address
*
* DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor
- * pointer structure. If it is found, set *RSDP to point to it.
+ * pointer structure. If it is found, set *RSDP to point to it.
*
* NOTE1: The RSDP must be either in the first 1K of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
new file mode 100644
index 000000000000..e1d40ed26390
--- /dev/null
+++ b/drivers/acpi/acpica/utcache.c
@@ -0,0 +1,323 @@
+/******************************************************************************
+ *
+ * Module Name: utcache - local cache allocation routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utcache")
+
+#ifdef ACPI_USE_LOCAL_CACHE
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_create_cache
+ *
+ * PARAMETERS: cache_name - Ascii name for the cache
+ * object_size - Size of each cached object
+ * max_depth - Maximum depth of the cache (in objects)
+ * return_cache - Where the new cache object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a cache object
+ *
+ ******************************************************************************/
+acpi_status
+acpi_os_create_cache(char *cache_name,
+ u16 object_size,
+ u16 max_depth, struct acpi_memory_list ** return_cache)
+{
+ struct acpi_memory_list *cache;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache_name || !return_cache || (object_size < 16)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Create the cache object */
+
+ cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
+ if (!cache) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Populate the cache object and return it */
+
+ ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+ cache->link_offset = 8;
+ cache->list_name = cache_name;
+ cache->object_size = object_size;
+ cache->max_depth = max_depth;
+
+ *return_cache = cache;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_purge_cache
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+{
+ char *next;
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Walk the list of objects in this cache */
+
+ while (cache->list_head) {
+
+ /* Delete and unlink one cached state object */
+
+ next = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)cache->
+ list_head)[cache->
+ link_offset])));
+ ACPI_FREE(cache->list_head);
+
+ cache->list_head = next;
+ cache->current_depth--;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_delete_cache
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Free all objects within the requested cache and delete the
+ * cache object.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Purge all objects in the cache */
+
+ status = acpi_os_purge_cache(cache);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Now we can delete the cache object */
+
+ acpi_os_free(cache);
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_release_object
+ *
+ * PARAMETERS: cache - Handle to cache object
+ * object - The object to be released
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Release an object to the specified cache. If cache is full,
+ * the object is deleted.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!cache || !object) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* If cache is full, just free this object */
+
+ if (cache->current_depth >= cache->max_depth) {
+ ACPI_FREE(object);
+ ACPI_MEM_TRACKING(cache->total_freed++);
+ }
+
+ /* Otherwise put this object back into the cache */
+
+ else {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Mark the object as cached */
+
+ ACPI_MEMSET(object, 0xCA, cache->object_size);
+ ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_CACHED);
+
+ /* Put the object at the head of the cache list */
+
+ *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)object)[cache->
+ link_offset]))) =
+ cache->list_head;
+ cache->list_head = object;
+ cache->current_depth++;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_acquire_object
+ *
+ * PARAMETERS: cache - Handle to cache object
+ *
+ * RETURN: the acquired object. NULL on error
+ *
+ * DESCRIPTION: Get an object from the specified cache. If cache is empty,
+ * the object is allocated.
+ *
+ ******************************************************************************/
+
+void *acpi_os_acquire_object(struct acpi_memory_list *cache)
+{
+ acpi_status status;
+ void *object;
+
+ ACPI_FUNCTION_NAME(os_acquire_object);
+
+ if (!cache) {
+ return (NULL);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ ACPI_MEM_TRACKING(cache->requests++);
+
+ /* Check the cache first */
+
+ if (cache->list_head) {
+
+ /* There is an object available, use it */
+
+ object = cache->list_head;
+ cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
+ &(((char *)
+ object)[cache->
+ link_offset])));
+
+ cache->current_depth--;
+
+ ACPI_MEM_TRACKING(cache->hits++);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p from %s cache\n", object,
+ cache->list_name));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ /* Clear (zero) the previously used Object */
+
+ ACPI_MEMSET(object, 0, cache->object_size);
+ } else {
+ /* The cache is empty, create a new object */
+
+ ACPI_MEM_TRACKING(cache->total_allocated++);
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ if ((cache->total_allocated - cache->total_freed) >
+ cache->max_occupied) {
+ cache->max_occupied =
+ cache->total_allocated - cache->total_freed;
+ }
+#endif
+
+ /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
+
+ status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
+ if (ACPI_FAILURE(status)) {
+ return (NULL);
+ }
+
+ object = ACPI_ALLOCATE_ZEROED(cache->object_size);
+ if (!object) {
+ return (NULL);
+ }
+ }
+
+ return (object);
+}
+#endif /* ACPI_USE_LOCAL_CACHE */
diff --git a/drivers/acpi/acpica/utclib.c b/drivers/acpi/acpica/utclib.c
new file mode 100644
index 000000000000..19ea4755aa73
--- /dev/null
+++ b/drivers/acpi/acpica/utclib.c
@@ -0,0 +1,749 @@
+/******************************************************************************
+ *
+ * Module Name: cmclib - Local implementation of C library functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+/*
+ * These implementations of standard C Library routines can optionally be
+ * used if a C library is not available. In general, they are less efficient
+ * than an inline or assembly implementation
+ */
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("cmclib")
+
+#ifndef ACPI_USE_SYSTEM_CLIBRARY
+#define NEGATIVE 1
+#define POSITIVE 0
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_memcmp (memcmp)
+ *
+ * PARAMETERS: buffer1 - First Buffer
+ * buffer2 - Second Buffer
+ * count - Maximum # of bytes to compare
+ *
+ * RETURN: Index where Buffers mismatched, or 0 if Buffers matched
+ *
+ * DESCRIPTION: Compare two Buffers, with a maximum length
+ *
+ ******************************************************************************/
+int acpi_ut_memcmp(const char *buffer1, const char *buffer2, acpi_size count)
+{
+
+ return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*buffer1 -
+ (unsigned char)*buffer2));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_memcpy (memcpy)
+ *
+ * PARAMETERS: dest - Target of the copy
+ * src - Source buffer to copy
+ * count - Number of bytes to copy
+ *
+ * RETURN: Dest
+ *
+ * DESCRIPTION: Copy arbitrary bytes of memory
+ *
+ ******************************************************************************/
+
+void *acpi_ut_memcpy(void *dest, const void *src, acpi_size count)
+{
+ char *new = (char *)dest;
+ char *old = (char *)src;
+
+ while (count) {
+ *new = *old;
+ new++;
+ old++;
+ count--;
+ }
+
+ return (dest);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_memset (memset)
+ *
+ * PARAMETERS: dest - Buffer to set
+ * value - Value to set each byte of memory
+ * count - Number of bytes to set
+ *
+ * RETURN: Dest
+ *
+ * DESCRIPTION: Initialize a buffer to a known value.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_memset(void *dest, u8 value, acpi_size count)
+{
+ char *new = (char *)dest;
+
+ while (count) {
+ *new = (char)value;
+ new++;
+ count--;
+ }
+
+ return (dest);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlen (strlen)
+ *
+ * PARAMETERS: string - Null terminated string
+ *
+ * RETURN: Length
+ *
+ * DESCRIPTION: Returns the length of the input string
+ *
+ ******************************************************************************/
+
+acpi_size acpi_ut_strlen(const char *string)
+{
+ u32 length = 0;
+
+ /* Count the string until a null is encountered */
+
+ while (*string) {
+ length++;
+ string++;
+ }
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strcpy (strcpy)
+ *
+ * PARAMETERS: dst_string - Target of the copy
+ * src_string - The source string to copy
+ *
+ * RETURN: dst_string
+ *
+ * DESCRIPTION: Copy a null terminated string
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strcpy(char *dst_string, const char *src_string)
+{
+ char *string = dst_string;
+
+ /* Move bytes brute force */
+
+ while (*src_string) {
+ *string = *src_string;
+
+ string++;
+ src_string++;
+ }
+
+ /* Null terminate */
+
+ *string = 0;
+ return (dst_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strncpy (strncpy)
+ *
+ * PARAMETERS: dst_string - Target of the copy
+ * src_string - The source string to copy
+ * count - Maximum # of bytes to copy
+ *
+ * RETURN: dst_string
+ *
+ * DESCRIPTION: Copy a null terminated string, with a maximum length
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strncpy(char *dst_string, const char *src_string, acpi_size count)
+{
+ char *string = dst_string;
+
+ /* Copy the string */
+
+ for (string = dst_string;
+ count && (count--, (*string++ = *src_string++));) {;
+ }
+
+ /* Pad with nulls if necessary */
+
+ while (count--) {
+ *string = 0;
+ string++;
+ }
+
+ /* Return original pointer */
+
+ return (dst_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strcmp (strcmp)
+ *
+ * PARAMETERS: string1 - First string
+ * string2 - Second string
+ *
+ * RETURN: Index where strings mismatched, or 0 if strings matched
+ *
+ * DESCRIPTION: Compare two null terminated strings
+ *
+ ******************************************************************************/
+
+int acpi_ut_strcmp(const char *string1, const char *string2)
+{
+
+ for (; (*string1 == *string2); string2++) {
+ if (!*string1++) {
+ return (0);
+ }
+ }
+
+ return ((unsigned char)*string1 - (unsigned char)*string2);
+}
+
+#ifdef ACPI_FUTURE_IMPLEMENTATION
+/* Not used at this time */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strchr (strchr)
+ *
+ * PARAMETERS: string - Search string
+ * ch - character to search for
+ *
+ * RETURN: Ptr to char or NULL if not found
+ *
+ * DESCRIPTION: Search a string for a character
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strchr(const char *string, int ch)
+{
+
+ for (; (*string); string++) {
+ if ((*string) == (char)ch) {
+ return ((char *)string);
+ }
+ }
+
+ return (NULL);
+}
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strncmp (strncmp)
+ *
+ * PARAMETERS: string1 - First string
+ * string2 - Second string
+ * count - Maximum # of bytes to compare
+ *
+ * RETURN: Index where strings mismatched, or 0 if strings matched
+ *
+ * DESCRIPTION: Compare two null terminated strings, with a maximum length
+ *
+ ******************************************************************************/
+
+int acpi_ut_strncmp(const char *string1, const char *string2, acpi_size count)
+{
+
+ for (; count-- && (*string1 == *string2); string2++) {
+ if (!*string1++) {
+ return (0);
+ }
+ }
+
+ return ((count == ACPI_SIZE_MAX) ? 0 : ((unsigned char)*string1 -
+ (unsigned char)*string2));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strcat (Strcat)
+ *
+ * PARAMETERS: dst_string - Target of the copy
+ * src_string - The source string to copy
+ *
+ * RETURN: dst_string
+ *
+ * DESCRIPTION: Append a null terminated string to a null terminated string
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strcat(char *dst_string, const char *src_string)
+{
+ char *string;
+
+ /* Find end of the destination string */
+
+ for (string = dst_string; *string++;) {;
+ }
+
+ /* Concatenate the string */
+
+ for (--string; (*string++ = *src_string++);) {;
+ }
+
+ return (dst_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strncat (strncat)
+ *
+ * PARAMETERS: dst_string - Target of the copy
+ * src_string - The source string to copy
+ * count - Maximum # of bytes to copy
+ *
+ * RETURN: dst_string
+ *
+ * DESCRIPTION: Append a null terminated string to a null terminated string,
+ * with a maximum count.
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strncat(char *dst_string, const char *src_string, acpi_size count)
+{
+ char *string;
+
+ if (count) {
+
+ /* Find end of the destination string */
+
+ for (string = dst_string; *string++;) {;
+ }
+
+ /* Concatenate the string */
+
+ for (--string; (*string++ = *src_string++) && --count;) {;
+ }
+
+ /* Null terminate if necessary */
+
+ if (!count) {
+ *string = 0;
+ }
+ }
+
+ return (dst_string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strstr (strstr)
+ *
+ * PARAMETERS: string1 - Target string
+ * string2 - Substring to search for
+ *
+ * RETURN: Where substring match starts, Null if no match found
+ *
+ * DESCRIPTION: Checks if String2 occurs in String1. This is not really a
+ * full implementation of strstr, only sufficient for command
+ * matching
+ *
+ ******************************************************************************/
+
+char *acpi_ut_strstr(char *string1, char *string2)
+{
+ char *string;
+
+ if (acpi_ut_strlen(string2) > acpi_ut_strlen(string1)) {
+ return (NULL);
+ }
+
+ /* Walk entire string, comparing the letters */
+
+ for (string = string1; *string2;) {
+ if (*string2 != *string) {
+ return (NULL);
+ }
+
+ string2++;
+ string++;
+ }
+
+ return (string1);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul (strtoul)
+ *
+ * PARAMETERS: string - Null terminated string
+ * terminater - Where a pointer to the terminating byte is
+ * returned
+ * base - Radix of the string
+ *
+ * RETURN: Converted value
+ *
+ * DESCRIPTION: Convert a string into a 32-bit unsigned value.
+ * Note: use acpi_ut_strtoul64 for 64-bit integers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_strtoul(const char *string, char **terminator, u32 base)
+{
+ u32 converted = 0;
+ u32 index;
+ u32 sign;
+ const char *string_start;
+ u32 return_value = 0;
+ acpi_status status = AE_OK;
+
+ /*
+ * Save the value of the pointer to the buffer's first
+ * character, save the current errno value, and then
+ * skip over any white space in the buffer:
+ */
+ string_start = string;
+ while (ACPI_IS_SPACE(*string) || *string == '\t') {
+ ++string;
+ }
+
+ /*
+ * The buffer may contain an optional plus or minus sign.
+ * If it does, then skip over it but remember what is was:
+ */
+ if (*string == '-') {
+ sign = NEGATIVE;
+ ++string;
+ } else if (*string == '+') {
+ ++string;
+ sign = POSITIVE;
+ } else {
+ sign = POSITIVE;
+ }
+
+ /*
+ * If the input parameter Base is zero, then we need to
+ * determine if it is octal, decimal, or hexadecimal:
+ */
+ if (base == 0) {
+ if (*string == '0') {
+ if (acpi_ut_to_lower(*(++string)) == 'x') {
+ base = 16;
+ ++string;
+ } else {
+ base = 8;
+ }
+ } else {
+ base = 10;
+ }
+ } else if (base < 2 || base > 36) {
+ /*
+ * The specified Base parameter is not in the domain of
+ * this function:
+ */
+ goto done;
+ }
+
+ /*
+ * For octal and hexadecimal bases, skip over the leading
+ * 0 or 0x, if they are present.
+ */
+ if (base == 8 && *string == '0') {
+ string++;
+ }
+
+ if (base == 16 &&
+ *string == '0' && acpi_ut_to_lower(*(++string)) == 'x') {
+ string++;
+ }
+
+ /*
+ * Main loop: convert the string to an unsigned long:
+ */
+ while (*string) {
+ if (ACPI_IS_DIGIT(*string)) {
+ index = (u32)((u8)*string - '0');
+ } else {
+ index = (u32)acpi_ut_to_upper(*string);
+ if (ACPI_IS_UPPER(index)) {
+ index = index - 'A' + 10;
+ } else {
+ goto done;
+ }
+ }
+
+ if (index >= base) {
+ goto done;
+ }
+
+ /*
+ * Check to see if value is out of range:
+ */
+
+ if (return_value > ((ACPI_UINT32_MAX - (u32)index) / (u32)base)) {
+ status = AE_ERROR;
+ return_value = 0; /* reset */
+ } else {
+ return_value *= base;
+ return_value += index;
+ converted = 1;
+ }
+
+ ++string;
+ }
+
+ done:
+ /*
+ * If appropriate, update the caller's pointer to the next
+ * unconverted character in the buffer.
+ */
+ if (terminator) {
+ if (converted == 0 && return_value == 0 && string != NULL) {
+ *terminator = (char *)string_start;
+ } else {
+ *terminator = (char *)string;
+ }
+ }
+
+ if (status == AE_ERROR) {
+ return_value = ACPI_UINT32_MAX;
+ }
+
+ /*
+ * If a minus sign was present, then "the conversion is negated":
+ */
+ if (sign == NEGATIVE) {
+ return_value = (ACPI_UINT32_MAX - return_value) + 1;
+ }
+
+ return (return_value);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_to_upper (TOUPPER)
+ *
+ * PARAMETERS: c - Character to convert
+ *
+ * RETURN: Converted character as an int
+ *
+ * DESCRIPTION: Convert character to uppercase
+ *
+ ******************************************************************************/
+
+int acpi_ut_to_upper(int c)
+{
+
+ return (ACPI_IS_LOWER(c) ? ((c) - 0x20) : (c));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_to_lower (TOLOWER)
+ *
+ * PARAMETERS: c - Character to convert
+ *
+ * RETURN: Converted character as an int
+ *
+ * DESCRIPTION: Convert character to lowercase
+ *
+ ******************************************************************************/
+
+int acpi_ut_to_lower(int c)
+{
+
+ return (ACPI_IS_UPPER(c) ? ((c) + 0x20) : (c));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: is* functions
+ *
+ * DESCRIPTION: is* functions use the ctype table below
+ *
+ ******************************************************************************/
+
+const u8 _acpi_ctype[257] = {
+ _ACPI_CN, /* 0x00 0 NUL */
+ _ACPI_CN, /* 0x01 1 SOH */
+ _ACPI_CN, /* 0x02 2 STX */
+ _ACPI_CN, /* 0x03 3 ETX */
+ _ACPI_CN, /* 0x04 4 EOT */
+ _ACPI_CN, /* 0x05 5 ENQ */
+ _ACPI_CN, /* 0x06 6 ACK */
+ _ACPI_CN, /* 0x07 7 BEL */
+ _ACPI_CN, /* 0x08 8 BS */
+ _ACPI_CN | _ACPI_SP, /* 0x09 9 TAB */
+ _ACPI_CN | _ACPI_SP, /* 0x0A 10 LF */
+ _ACPI_CN | _ACPI_SP, /* 0x0B 11 VT */
+ _ACPI_CN | _ACPI_SP, /* 0x0C 12 FF */
+ _ACPI_CN | _ACPI_SP, /* 0x0D 13 CR */
+ _ACPI_CN, /* 0x0E 14 SO */
+ _ACPI_CN, /* 0x0F 15 SI */
+ _ACPI_CN, /* 0x10 16 DLE */
+ _ACPI_CN, /* 0x11 17 DC1 */
+ _ACPI_CN, /* 0x12 18 DC2 */
+ _ACPI_CN, /* 0x13 19 DC3 */
+ _ACPI_CN, /* 0x14 20 DC4 */
+ _ACPI_CN, /* 0x15 21 NAK */
+ _ACPI_CN, /* 0x16 22 SYN */
+ _ACPI_CN, /* 0x17 23 ETB */
+ _ACPI_CN, /* 0x18 24 CAN */
+ _ACPI_CN, /* 0x19 25 EM */
+ _ACPI_CN, /* 0x1A 26 SUB */
+ _ACPI_CN, /* 0x1B 27 ESC */
+ _ACPI_CN, /* 0x1C 28 FS */
+ _ACPI_CN, /* 0x1D 29 GS */
+ _ACPI_CN, /* 0x1E 30 RS */
+ _ACPI_CN, /* 0x1F 31 US */
+ _ACPI_XS | _ACPI_SP, /* 0x20 32 ' ' */
+ _ACPI_PU, /* 0x21 33 '!' */
+ _ACPI_PU, /* 0x22 34 '"' */
+ _ACPI_PU, /* 0x23 35 '#' */
+ _ACPI_PU, /* 0x24 36 '$' */
+ _ACPI_PU, /* 0x25 37 '%' */
+ _ACPI_PU, /* 0x26 38 '&' */
+ _ACPI_PU, /* 0x27 39 ''' */
+ _ACPI_PU, /* 0x28 40 '(' */
+ _ACPI_PU, /* 0x29 41 ')' */
+ _ACPI_PU, /* 0x2A 42 '*' */
+ _ACPI_PU, /* 0x2B 43 '+' */
+ _ACPI_PU, /* 0x2C 44 ',' */
+ _ACPI_PU, /* 0x2D 45 '-' */
+ _ACPI_PU, /* 0x2E 46 '.' */
+ _ACPI_PU, /* 0x2F 47 '/' */
+ _ACPI_XD | _ACPI_DI, /* 0x30 48 '0' */
+ _ACPI_XD | _ACPI_DI, /* 0x31 49 '1' */
+ _ACPI_XD | _ACPI_DI, /* 0x32 50 '2' */
+ _ACPI_XD | _ACPI_DI, /* 0x33 51 '3' */
+ _ACPI_XD | _ACPI_DI, /* 0x34 52 '4' */
+ _ACPI_XD | _ACPI_DI, /* 0x35 53 '5' */
+ _ACPI_XD | _ACPI_DI, /* 0x36 54 '6' */
+ _ACPI_XD | _ACPI_DI, /* 0x37 55 '7' */
+ _ACPI_XD | _ACPI_DI, /* 0x38 56 '8' */
+ _ACPI_XD | _ACPI_DI, /* 0x39 57 '9' */
+ _ACPI_PU, /* 0x3A 58 ':' */
+ _ACPI_PU, /* 0x3B 59 ';' */
+ _ACPI_PU, /* 0x3C 60 '<' */
+ _ACPI_PU, /* 0x3D 61 '=' */
+ _ACPI_PU, /* 0x3E 62 '>' */
+ _ACPI_PU, /* 0x3F 63 '?' */
+ _ACPI_PU, /* 0x40 64 '@' */
+ _ACPI_XD | _ACPI_UP, /* 0x41 65 'A' */
+ _ACPI_XD | _ACPI_UP, /* 0x42 66 'B' */
+ _ACPI_XD | _ACPI_UP, /* 0x43 67 'C' */
+ _ACPI_XD | _ACPI_UP, /* 0x44 68 'D' */
+ _ACPI_XD | _ACPI_UP, /* 0x45 69 'E' */
+ _ACPI_XD | _ACPI_UP, /* 0x46 70 'F' */
+ _ACPI_UP, /* 0x47 71 'G' */
+ _ACPI_UP, /* 0x48 72 'H' */
+ _ACPI_UP, /* 0x49 73 'I' */
+ _ACPI_UP, /* 0x4A 74 'J' */
+ _ACPI_UP, /* 0x4B 75 'K' */
+ _ACPI_UP, /* 0x4C 76 'L' */
+ _ACPI_UP, /* 0x4D 77 'M' */
+ _ACPI_UP, /* 0x4E 78 'N' */
+ _ACPI_UP, /* 0x4F 79 'O' */
+ _ACPI_UP, /* 0x50 80 'P' */
+ _ACPI_UP, /* 0x51 81 'Q' */
+ _ACPI_UP, /* 0x52 82 'R' */
+ _ACPI_UP, /* 0x53 83 'S' */
+ _ACPI_UP, /* 0x54 84 'T' */
+ _ACPI_UP, /* 0x55 85 'U' */
+ _ACPI_UP, /* 0x56 86 'V' */
+ _ACPI_UP, /* 0x57 87 'W' */
+ _ACPI_UP, /* 0x58 88 'X' */
+ _ACPI_UP, /* 0x59 89 'Y' */
+ _ACPI_UP, /* 0x5A 90 'Z' */
+ _ACPI_PU, /* 0x5B 91 '[' */
+ _ACPI_PU, /* 0x5C 92 '\' */
+ _ACPI_PU, /* 0x5D 93 ']' */
+ _ACPI_PU, /* 0x5E 94 '^' */
+ _ACPI_PU, /* 0x5F 95 '_' */
+ _ACPI_PU, /* 0x60 96 '`' */
+ _ACPI_XD | _ACPI_LO, /* 0x61 97 'a' */
+ _ACPI_XD | _ACPI_LO, /* 0x62 98 'b' */
+ _ACPI_XD | _ACPI_LO, /* 0x63 99 'c' */
+ _ACPI_XD | _ACPI_LO, /* 0x64 100 'd' */
+ _ACPI_XD | _ACPI_LO, /* 0x65 101 'e' */
+ _ACPI_XD | _ACPI_LO, /* 0x66 102 'f' */
+ _ACPI_LO, /* 0x67 103 'g' */
+ _ACPI_LO, /* 0x68 104 'h' */
+ _ACPI_LO, /* 0x69 105 'i' */
+ _ACPI_LO, /* 0x6A 106 'j' */
+ _ACPI_LO, /* 0x6B 107 'k' */
+ _ACPI_LO, /* 0x6C 108 'l' */
+ _ACPI_LO, /* 0x6D 109 'm' */
+ _ACPI_LO, /* 0x6E 110 'n' */
+ _ACPI_LO, /* 0x6F 111 'o' */
+ _ACPI_LO, /* 0x70 112 'p' */
+ _ACPI_LO, /* 0x71 113 'q' */
+ _ACPI_LO, /* 0x72 114 'r' */
+ _ACPI_LO, /* 0x73 115 's' */
+ _ACPI_LO, /* 0x74 116 't' */
+ _ACPI_LO, /* 0x75 117 'u' */
+ _ACPI_LO, /* 0x76 118 'v' */
+ _ACPI_LO, /* 0x77 119 'w' */
+ _ACPI_LO, /* 0x78 120 'x' */
+ _ACPI_LO, /* 0x79 121 'y' */
+ _ACPI_LO, /* 0x7A 122 'z' */
+ _ACPI_PU, /* 0x7B 123 '{' */
+ _ACPI_PU, /* 0x7C 124 '|' */
+ _ACPI_PU, /* 0x7D 125 '}' */
+ _ACPI_PU, /* 0x7E 126 '~' */
+ _ACPI_CN, /* 0x7F 127 DEL */
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x80 to 0x8F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x90 to 0x9F */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xA0 to 0xAF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xB0 to 0xBF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xC0 to 0xCF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xD0 to 0xDF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xE0 to 0xEF */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0xF0 to 0xFF */
+ 0 /* 0x100 */
+};
+
+#endif /* ACPI_USE_SYSTEM_CLIBRARY */
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index e810894149ae..5d95166245ae 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -47,8 +47,9 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
+
#ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id;
+static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF;
static char *acpi_gbl_fn_entry_str = "----Entry";
static char *acpi_gbl_fn_exit_str = "----Exit-";
@@ -109,7 +110,7 @@ void acpi_ut_track_stack_ptr(void)
* RETURN: Updated pointer to the function name
*
* DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
- * This allows compiler macros such as __func__ to be used
+ * This allows compiler macros such as __FUNCTION__ to be used
* with no change to the debug output.
*
******************************************************************************/
@@ -222,7 +223,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print)
*
* RETURN: None
*
- * DESCRIPTION: Print message with no headers. Has same interface as
+ * DESCRIPTION: Print message with no headers. Has same interface as
* debug_print so that the same macros can be used.
*
******************************************************************************/
@@ -258,7 +259,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print_raw)
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -290,7 +291,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -299,6 +300,7 @@ acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, void *pointer)
{
+
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
@@ -319,7 +321,7 @@ acpi_ut_trace_ptr(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -350,7 +352,7 @@ acpi_ut_trace_str(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -380,7 +382,7 @@ acpi_ut_trace_u32(u32 line_number,
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
@@ -412,7 +414,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit status also.
*
******************************************************************************/
@@ -453,7 +455,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
@@ -485,7 +487,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
*
* RETURN: None
*
- * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
+ * DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
@@ -511,7 +513,7 @@ acpi_ut_ptr_exit(u32 line_number,
* PARAMETERS: buffer - Buffer to dump
* count - Amount to dump, in bytes
* display - BYTE, WORD, DWORD, or QWORD display
- * component_ID - Caller's component ID
+ * offset - Beginning buffer offset (display only)
*
* RETURN: None
*
@@ -519,7 +521,7 @@ acpi_ut_ptr_exit(u32 line_number,
*
******************************************************************************/
-void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
+void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
{
u32 i = 0;
u32 j;
@@ -541,7 +543,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
/* Print current offset */
- acpi_os_printf("%6.4X: ", i);
+ acpi_os_printf("%6.4X: ", (base_offset + i));
/* Print 16 hex chars */
@@ -623,7 +625,7 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_dump_buffer
+ * FUNCTION: acpi_ut_debug_dump_buffer
*
* PARAMETERS: buffer - Buffer to dump
* count - Amount to dump, in bytes
@@ -636,7 +638,8 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
*
******************************************************************************/
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+void
+acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id)
{
/* Only dump the buffer if tracing is enabled */
@@ -646,5 +649,5 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
return;
}
- acpi_ut_dump_buffer2(buffer, count, display);
+ acpi_ut_dump_buffer(buffer, count, display, 0);
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 5d84e1954575..774c3aefbf5d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -67,10 +67,10 @@ ACPI_MODULE_NAME("utids")
******************************************************************************/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id)
+ struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
- struct acpica_device_id *hid;
+ struct acpi_pnp_device_id *hid;
u32 length;
acpi_status status;
@@ -94,16 +94,17 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
/* Allocate a buffer for the HID */
hid =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
}
- /* Area for the string starts after DEVICE_ID struct */
+ /* Area for the string starts after PNP_DEVICE_ID struct */
- hid->string = ACPI_ADD_PTR(char, hid, sizeof(struct acpica_device_id));
+ hid->string =
+ ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id));
/* Convert EISAID to a string or simply copy existing string */
@@ -126,6 +127,73 @@ cleanup:
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_execute_SUB
+ *
+ * PARAMETERS: device_node - Node for the device
+ * return_id - Where the _SUB is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Executes the _SUB control method that returns the subsystem
+ * ID of the device. The _SUB value is always a string containing
+ * either a valid PNP or ACPI ID.
+ *
+ * NOTE: Internal function, no parameter validation
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
+ struct acpi_pnp_device_id **return_id)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_pnp_device_id *sub;
+ u32 length;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_execute_SUB);
+
+ status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB,
+ ACPI_BTYPE_STRING, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the size of the String to be returned, includes null terminator */
+
+ length = obj_desc->string.length + 1;
+
+ /* Allocate a buffer for the SUB */
+
+ sub =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
+ (acpi_size) length);
+ if (!sub) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ /* Area for the string starts after PNP_DEVICE_ID struct */
+
+ sub->string =
+ ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id));
+
+ /* Simply copy existing string */
+
+ ACPI_STRCPY(sub->string, obj_desc->string.pointer);
+ sub->length = length;
+ *return_id = sub;
+
+ cleanup:
+
+ /* On exit, we must delete the return object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_execute_UID
*
* PARAMETERS: device_node - Node for the device
@@ -144,10 +212,10 @@ cleanup:
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
- struct acpica_device_id **return_id)
+ struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
- struct acpica_device_id *uid;
+ struct acpi_pnp_device_id *uid;
u32 length;
acpi_status status;
@@ -171,16 +239,17 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
/* Allocate a buffer for the UID */
uid =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpica_device_id) +
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
}
- /* Area for the string starts after DEVICE_ID struct */
+ /* Area for the string starts after PNP_DEVICE_ID struct */
- uid->string = ACPI_ADD_PTR(char, uid, sizeof(struct acpica_device_id));
+ uid->string =
+ ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id));
/* Convert an Integer to string, or just copy an existing string */
@@ -226,11 +295,11 @@ cleanup:
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
- struct acpica_device_id_list **return_cid_list)
+ struct acpi_pnp_device_id_list **return_cid_list)
{
union acpi_operand_object **cid_objects;
union acpi_operand_object *obj_desc;
- struct acpica_device_id_list *cid_list;
+ struct acpi_pnp_device_id_list *cid_list;
char *next_id_string;
u32 string_area_size;
u32 length;
@@ -288,11 +357,12 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/*
* Now that we know the length of the CIDs, allocate return buffer:
* 1) Size of the base structure +
- * 2) Size of the CID DEVICE_ID array +
+ * 2) Size of the CID PNP_DEVICE_ID array +
* 3) Size of the actual CID strings
*/
- cid_list_size = sizeof(struct acpica_device_id_list) +
- ((count - 1) * sizeof(struct acpica_device_id)) + string_area_size;
+ cid_list_size = sizeof(struct acpi_pnp_device_id_list) +
+ ((count - 1) * sizeof(struct acpi_pnp_device_id)) +
+ string_area_size;
cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
if (!cid_list) {
@@ -300,10 +370,10 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
goto cleanup;
}
- /* Area for CID strings starts after the CID DEVICE_ID array */
+ /* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
- ((acpi_size) count * sizeof(struct acpica_device_id));
+ ((acpi_size) count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index d88a8aaab2a6..49563674833a 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -81,7 +81,7 @@ typedef union uint64_overlay {
* RETURN: Status (Checks for divide-by-zero)
*
* DESCRIPTION: Perform a short (maximum 64 bits divided by 32 bits)
- * divide and modulo. The result is a 64-bit quotient and a
+ * divide and modulo. The result is a 64-bit quotient and a
* 32-bit remainder.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 33c6cf7ff467..9286a69eb9aa 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
@@ -201,8 +199,8 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
*/
acpi_gbl_owner_id_mask[j] |= (1 << k);
- acpi_gbl_last_owner_id_index = (u8) j;
- acpi_gbl_next_owner_id_offset = (u8) (k + 1);
+ acpi_gbl_last_owner_id_index = (u8)j;
+ acpi_gbl_next_owner_id_offset = (u8)(k + 1);
/*
* Construct encoded ID from the index and bit position
@@ -252,7 +250,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* control method or unloading a table. Either way, we would
* ignore any error anyway.
*
- * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
*
******************************************************************************/
@@ -339,6 +337,73 @@ void acpi_ut_strupr(char *src_string)
return;
}
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS: src_string - The source string to convert
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Convert string to lowercase
+ *
+ * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
+ *
+ ******************************************************************************/
+
+void acpi_ut_strlwr(char *src_string)
+{
+ char *string;
+
+ ACPI_FUNCTION_ENTRY();
+
+ if (!src_string) {
+ return;
+ }
+
+ /* Walk entire string, lowercasing the letters */
+
+ for (string = src_string; *string; string++) {
+ *string = (char)ACPI_TOLOWER(*string);
+ }
+
+ return;
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ut_stricmp
+ *
+ * PARAMETERS: string1 - first string to compare
+ * string2 - second string to compare
+ *
+ * RETURN: int that signifies string relationship. Zero means strings
+ * are equal.
+ *
+ * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
+ * strings with no case sensitivity)
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+ int c1;
+ int c2;
+
+ do {
+ c1 = tolower((int)*string1);
+ c2 = tolower((int)*string2);
+
+ string1++;
+ string2++;
+ }
+ while ((c1 == c2) && (c1));
+
+ return (c1 - c2);
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_print_string
@@ -469,8 +534,8 @@ u32 acpi_ut_dword_byte_swap(u32 value)
* RETURN: None
*
* DESCRIPTION: Set the global integer bit width based upon the revision
- * of the DSDT. For Revision 1 and 0, Integers are 32 bits.
- * For Revision 2 and above, Integers are 64 bits. Yes, this
+ * of the DSDT. For Revision 1 and 0, Integers are 32 bits.
+ * For Revision 2 and above, Integers are 64 bits. Yes, this
* makes a difference.
*
******************************************************************************/
@@ -606,7 +671,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position)
*
* RETURN: TRUE if the name is valid, FALSE otherwise
*
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
+ * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
* 1) Upper case alpha
* 2) numeric
* 3) underscore
@@ -638,29 +703,59 @@ u8 acpi_ut_valid_acpi_name(u32 name)
* RETURN: Repaired version of the name
*
* DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
- * return the new name.
+ * return the new name. NOTE: the Name parameter must reside in
+ * read/write memory, cannot be a const.
+ *
+ * An ACPI Name must consist of valid ACPI characters. We will repair the name
+ * if necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
*
******************************************************************************/
-acpi_name acpi_ut_repair_name(char *name)
+void acpi_ut_repair_name(char *name)
{
- u32 i;
- char new_name[ACPI_NAME_SIZE];
+ u32 i;
+ u8 found_bad_char = FALSE;
+ u32 original_name;
+
+ ACPI_FUNCTION_NAME(ut_repair_name);
+
+ ACPI_MOVE_NAME(&original_name, name);
+
+ /* Check each character in the name */
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- new_name[i] = name[i];
+ if (acpi_ut_valid_acpi_char(name[i], i)) {
+ continue;
+ }
/*
* Replace a bad character with something printable, yet technically
* still invalid. This prevents any collisions with existing "good"
* names in the namespace.
*/
- if (!acpi_ut_valid_acpi_char(name[i], i)) {
- new_name[i] = '*';
- }
+ name[i] = '*';
+ found_bad_char = TRUE;
}
- return (*(u32 *) new_name);
+ if (found_bad_char) {
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ name));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ name));
+ }
+ }
}
/*******************************************************************************
@@ -681,7 +776,7 @@ acpi_name acpi_ut_repair_name(char *name)
*
******************************************************************************/
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
{
u32 this_digit = 0;
u64 return_value = 0;
@@ -754,14 +849,14 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
/* Convert ASCII 0-9 to Decimal value */
- this_digit = ((u8) * string) - '0';
+ this_digit = ((u8)*string) - '0';
} else if (base == 10) {
/* Digit is out of range; possible in to_integer case only */
term = 1;
} else {
- this_digit = (u8) ACPI_TOUPPER(*string);
+ this_digit = (u8)ACPI_TOUPPER(*string);
if (ACPI_IS_XDIGIT((char)this_digit)) {
/* Convert ASCII Hex char to value */
@@ -788,8 +883,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
valid_digits++;
- if (sign_of0x && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
/*
* This is to_integer operation case.
* No any restrictions for string-to-integer conversion,
@@ -800,7 +896,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
/* Divide the digit into the correct position */
- (void)acpi_ut_short_divide((dividend - (u64) this_digit),
+ (void)acpi_ut_short_divide((dividend - (u64)this_digit),
base, &quotient, NULL);
if (return_value > quotient) {
@@ -890,7 +986,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
******************************************************************************/
acpi_status
-acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
+acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void *target_object,
acpi_pkg_callback walk_callback, void *context)
{
@@ -917,10 +1013,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
/*
* Check for:
- * 1) An uninitialized package element. It is completely
+ * 1) An uninitialized package element. It is completely
* legal to declare a package and leave it uninitialized
* 2) Not an internal object - can be a namespace node instead
- * 3) Any type other than a package. Packages are handled in else
+ * 3) Any type other than a package. Packages are handled in else
* case below.
*/
if ((!this_source_obj) ||
@@ -939,7 +1035,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
state->pkg.source_object->package.count) {
/*
* We've handled all of the objects at this level, This means
- * that we have just completed a package. That package may
+ * that we have just completed a package. That package may
* have contained one or more packages itself.
*
* Delete this state and pop the previous state (package).
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 296baa676bc5..5ccf57c0d87e 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -193,6 +193,8 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].mutex = NULL;
acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+
+ return_VOID;
}
/*******************************************************************************
@@ -226,9 +228,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
/*
* Mutex debug code, for internal debugging only.
*
- * Deadlock prevention. Check if this thread owns any mutexes of value
- * greater than or equal to this one. If so, the thread has violated
- * the mutex ordering rule. This indicates a coding error somewhere in
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than or equal to this one. If so, the thread has violated
+ * the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
@@ -319,9 +321,9 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
/*
* Mutex debug code, for internal debugging only.
*
- * Deadlock prevention. Check if this thread owns any mutexes of value
- * greater than this one. If so, the thread has violated the mutex
- * ordering rule. This indicates a coding error somewhere in
+ * Deadlock prevention. Check if this thread owns any mutexes of value
+ * greater than this one. If so, the thread has violated the mutex
+ * ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 655f0799a391..5c52ca78f6fa 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -77,7 +77,7 @@ acpi_ut_get_element_length(u8 object_type,
*
* NOTE: We always allocate the worst-case object descriptor because
* these objects are cached, and we want them to be
- * one-size-satisifies-any-request. This in itself may not be
+ * one-size-satisifies-any-request. This in itself may not be
* the most memory efficient, but the efficiency of the object
* cache should more than make up for this!
*
@@ -370,9 +370,9 @@ u8 acpi_ut_valid_internal_object(void *object)
* line_number - Caller's line number (for error output)
* component_id - Caller's component ID (for error output)
*
- * RETURN: Pointer to newly allocated object descriptor. Null on error
+ * RETURN: Pointer to newly allocated object descriptor. Null on error
*
- * DESCRIPTION: Allocate a new object descriptor. Gracefully handle
+ * DESCRIPTION: Allocate a new object descriptor. Gracefully handle
* error conditions.
*
******************************************************************************/
@@ -554,7 +554,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/*
* Account for the space required by the object rounded up to the next
- * multiple of the machine word size. This keeps each object aligned
+ * multiple of the machine word size. This keeps each object aligned
* on a machine word boundary. (preventing alignment faults on some
* machines.)
*/
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index a1c988260073..cee0473ba813 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -147,7 +147,7 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
*
* RETURN: The new state object. NULL on failure.
*
- * DESCRIPTION: Create a generic state object. Attempt to obtain one from
+ * DESCRIPTION: Create a generic state object. Attempt to obtain one from
* the global state cache; If none available, create a new one.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
new file mode 100644
index 000000000000..a424a9e3fea4
--- /dev/null
+++ b/drivers/acpi/acpica/uttrack.c
@@ -0,0 +1,692 @@
+/******************************************************************************
+ *
+ * Module Name: uttrack - Memory allocation tracking routines (debug only)
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+/*
+ * These procedures are used for tracking memory leaks in the subsystem, and
+ * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set.
+ *
+ * Each memory allocation is tracked via a doubly linked list. Each
+ * element contains the caller's component, module name, function name, and
+ * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call
+ * acpi_ut_track_allocation to add an element to the list; deletion
+ * occurs in the body of acpi_ut_free.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("uttrack")
+
+/* Local prototypes */
+static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
+ acpi_debug_mem_block
+ *allocation);
+
+static acpi_status
+acpi_ut_track_allocation(struct acpi_debug_mem_block *address,
+ acpi_size size,
+ u8 alloc_type,
+ u32 component, const char *module, u32 line);
+
+static acpi_status
+acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
+ u32 component, const char *module, u32 line);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_create_list
+ *
+ * PARAMETERS: cache_name - Ascii name for the cache
+ * object_size - Size of each cached object
+ * return_cache - Where the new cache object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create a local memory list for tracking purposed
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+ u16 object_size, struct acpi_memory_list **return_cache)
+{
+ struct acpi_memory_list *cache;
+
+ cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
+ if (!cache) {
+ return (AE_NO_MEMORY);
+ }
+
+ ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
+
+ cache->list_name = list_name;
+ cache->object_size = object_size;
+
+ *return_cache = cache;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_and_track
+ *
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: The subsystem's equivalent of malloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_and_track(acpi_size size,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *allocation;
+ acpi_status status;
+
+ allocation =
+ acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
+ component, module, line);
+ if (!allocation) {
+ return (NULL);
+ }
+
+ status = acpi_ut_track_allocation(allocation, size,
+ ACPI_MEM_MALLOC, component, module,
+ line);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_free(allocation);
+ return (NULL);
+ }
+
+ acpi_gbl_global_list->total_allocated++;
+ acpi_gbl_global_list->total_size += (u32)size;
+ acpi_gbl_global_list->current_total_size += (u32)size;
+ if (acpi_gbl_global_list->current_total_size >
+ acpi_gbl_global_list->max_occupied) {
+ acpi_gbl_global_list->max_occupied =
+ acpi_gbl_global_list->current_total_size;
+ }
+
+ return ((void *)&allocation->user_space);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_allocate_zeroed_and_track
+ *
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Address of the allocated memory on success, NULL on failure.
+ *
+ * DESCRIPTION: Subsystem equivalent of calloc.
+ *
+ ******************************************************************************/
+
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+ u32 component,
+ const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *allocation;
+ acpi_status status;
+
+ allocation =
+ acpi_ut_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header),
+ component, module, line);
+ if (!allocation) {
+
+ /* Report allocation error */
+
+ ACPI_ERROR((module, line,
+ "Could not allocate size %u", (u32)size));
+ return (NULL);
+ }
+
+ status = acpi_ut_track_allocation(allocation, size,
+ ACPI_MEM_CALLOC, component, module,
+ line);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_free(allocation);
+ return (NULL);
+ }
+
+ acpi_gbl_global_list->total_allocated++;
+ acpi_gbl_global_list->total_size += (u32)size;
+ acpi_gbl_global_list->current_total_size += (u32)size;
+ if (acpi_gbl_global_list->current_total_size >
+ acpi_gbl_global_list->max_occupied) {
+ acpi_gbl_global_list->max_occupied =
+ acpi_gbl_global_list->current_total_size;
+ }
+
+ return ((void *)&allocation->user_space);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_free_and_track
+ *
+ * PARAMETERS: allocation - Address of the memory to deallocate
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Frees the memory at Allocation
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_free_and_track(void *allocation,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_debug_mem_block *debug_block;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_free, allocation);
+
+ if (NULL == allocation) {
+ ACPI_ERROR((module, line, "Attempt to delete a NULL address"));
+
+ return_VOID;
+ }
+
+ debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block,
+ (((char *)allocation) -
+ sizeof(struct acpi_debug_mem_header)));
+
+ acpi_gbl_global_list->total_freed++;
+ acpi_gbl_global_list->current_total_size -= debug_block->size;
+
+ status = acpi_ut_remove_allocation(debug_block,
+ component, module, line);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Could not free memory"));
+ }
+
+ acpi_os_free(debug_block);
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_find_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ *
+ * RETURN: Three cases:
+ * 1) List is empty, NULL is returned.
+ * 2) Element was found. Returns Allocation parameter.
+ * 3) Element was not found. Returns position where it should be
+ * inserted into the list.
+ *
+ * DESCRIPTION: Searches for an element in the global allocation tracking list.
+ * If the element is not found, returns the location within the
+ * list where the element should be inserted.
+ *
+ * Note: The list is ordered by larger-to-smaller addresses.
+ *
+ * This global list is used to detect memory leaks in ACPICA as
+ * well as other issues such as an attempt to release the same
+ * internal object more than once. Although expensive as far
+ * as cpu time, this list is much more helpful for finding these
+ * types of issues than using memory leak detectors outside of
+ * the ACPICA code.
+ *
+ ******************************************************************************/
+
+static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct
+ acpi_debug_mem_block
+ *allocation)
+{
+ struct acpi_debug_mem_block *element;
+
+ element = acpi_gbl_global_list->list_head;
+ if (!element) {
+ return (NULL);
+ }
+
+ /*
+ * Search for the address.
+ *
+ * Note: List is ordered by larger-to-smaller addresses, on the
+ * assumption that a new allocation usually has a larger address
+ * than previous allocations.
+ */
+ while (element > allocation) {
+
+ /* Check for end-of-list */
+
+ if (!element->next) {
+ return (element);
+ }
+
+ element = element->next;
+ }
+
+ if (element == allocation) {
+ return (element);
+ }
+
+ return (element->previous);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_track_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ * size - Size of the allocation
+ * alloc_type - MEM_MALLOC or MEM_CALLOC
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Inserts an element into the global allocation tracking list.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
+ acpi_size size,
+ u8 alloc_type,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_memory_list *mem_list;
+ struct acpi_debug_mem_block *element;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ mem_list = acpi_gbl_global_list;
+ status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Search the global list for this address to make sure it is not
+ * already present. This will catch several kinds of problems.
+ */
+ element = acpi_ut_find_allocation(allocation);
+ if (element == allocation) {
+ ACPI_ERROR((AE_INFO,
+ "UtTrackAllocation: Allocation (%p) already present in global list!",
+ allocation));
+ goto unlock_and_exit;
+ }
+
+ /* Fill in the instance data */
+
+ allocation->size = (u32)size;
+ allocation->alloc_type = alloc_type;
+ allocation->component = component;
+ allocation->line = line;
+
+ ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
+ allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+
+ if (!element) {
+
+ /* Insert at list head */
+
+ if (mem_list->list_head) {
+ ((struct acpi_debug_mem_block *)(mem_list->list_head))->
+ previous = allocation;
+ }
+
+ allocation->next = mem_list->list_head;
+ allocation->previous = NULL;
+
+ mem_list->list_head = allocation;
+ } else {
+ /* Insert after element */
+
+ allocation->next = element->next;
+ allocation->previous = element;
+
+ if (element->next) {
+ (element->next)->previous = allocation;
+ }
+
+ element->next = allocation;
+ }
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_allocation
+ *
+ * PARAMETERS: allocation - Address of allocated memory
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Deletes an element from the global allocation tracking list.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
+ u32 component, const char *module, u32 line)
+{
+ struct acpi_memory_list *mem_list;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_remove_allocation);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ mem_list = acpi_gbl_global_list;
+ if (NULL == mem_list->list_head) {
+
+ /* No allocations! */
+
+ ACPI_ERROR((module, line,
+ "Empty allocation list, nothing to free!"));
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink */
+
+ if (allocation->previous) {
+ (allocation->previous)->next = allocation->next;
+ } else {
+ mem_list->list_head = allocation->next;
+ }
+
+ if (allocation->next) {
+ (allocation->next)->previous = allocation->previous;
+ }
+
+ /* Mark the segment as deleted */
+
+ ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
+ allocation->size));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_allocation_info
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print some info about the outstanding allocations.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_allocation_info(void)
+{
+/*
+ struct acpi_memory_list *mem_list;
+*/
+
+ ACPI_FUNCTION_TRACE(ut_dump_allocation_info);
+
+/*
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Current allocations",
+ mem_list->current_count,
+ ROUND_UP_TO_1K (mem_list->current_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations",
+ mem_list->max_concurrent_count,
+ ROUND_UP_TO_1K (mem_list->max_concurrent_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects",
+ running_object_count,
+ ROUND_UP_TO_1K (running_object_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Total (all) allocations",
+ running_alloc_count,
+ ROUND_UP_TO_1K (running_alloc_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Current Nodes",
+ acpi_gbl_current_node_count,
+ ROUND_UP_TO_1K (acpi_gbl_current_node_size)));
+
+ ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
+ ("%30s: %4d (%3d Kb)\n", "Max Nodes",
+ acpi_gbl_max_concurrent_node_count,
+ ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count *
+ sizeof (struct acpi_namespace_node)))));
+*/
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_allocations
+ *
+ * PARAMETERS: component - Component(s) to dump info for.
+ * module - Module to dump info for. NULL means all.
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print a list of all outstanding allocations.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_allocations(u32 component, const char *module)
+{
+ struct acpi_debug_mem_block *element;
+ union acpi_descriptor *descriptor;
+ u32 num_outstanding = 0;
+ u8 descriptor_type;
+
+ ACPI_FUNCTION_TRACE(ut_dump_allocations);
+
+ if (acpi_gbl_disable_mem_tracking) {
+ return_VOID;
+ }
+
+ /*
+ * Walk the allocation list.
+ */
+ if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) {
+ return_VOID;
+ }
+
+ element = acpi_gbl_global_list->list_head;
+ while (element) {
+ if ((element->component & component) &&
+ ((module == NULL)
+ || (0 == ACPI_STRCMP(module, element->module)))) {
+ descriptor =
+ ACPI_CAST_PTR(union acpi_descriptor,
+ &element->user_space);
+
+ if (element->size <
+ sizeof(struct acpi_common_descriptor)) {
+ acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ "[Not a Descriptor - too small]\n",
+ descriptor, element->size,
+ element->module, element->line);
+ } else {
+ /* Ignore allocated objects that are in a cache */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
+ ACPI_DESC_TYPE_CACHED) {
+ acpi_os_printf
+ ("%p Length 0x%04X %9.9s-%u [%s] ",
+ descriptor, element->size,
+ element->module, element->line,
+ acpi_ut_get_descriptor_name
+ (descriptor));
+
+ /* Validate the descriptor type using Type field and length */
+
+ descriptor_type = 0; /* Not a valid descriptor type */
+
+ switch (ACPI_GET_DESCRIPTOR_TYPE
+ (descriptor)) {
+ case ACPI_DESC_TYPE_OPERAND:
+ if (element->size ==
+ sizeof(union
+ acpi_operand_object))
+ {
+ descriptor_type =
+ ACPI_DESC_TYPE_OPERAND;
+ }
+ break;
+
+ case ACPI_DESC_TYPE_PARSER:
+ if (element->size ==
+ sizeof(union
+ acpi_parse_object)) {
+ descriptor_type =
+ ACPI_DESC_TYPE_PARSER;
+ }
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+ if (element->size ==
+ sizeof(struct
+ acpi_namespace_node))
+ {
+ descriptor_type =
+ ACPI_DESC_TYPE_NAMED;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Display additional info for the major descriptor types */
+
+ switch (descriptor_type) {
+ case ACPI_DESC_TYPE_OPERAND:
+ acpi_os_printf
+ ("%12.12s RefCount 0x%04X\n",
+ acpi_ut_get_type_name
+ (descriptor->object.common.
+ type),
+ descriptor->object.common.
+ reference_count);
+ break;
+
+ case ACPI_DESC_TYPE_PARSER:
+ acpi_os_printf
+ ("AmlOpcode 0x%04hX\n",
+ descriptor->op.asl.
+ aml_opcode);
+ break;
+
+ case ACPI_DESC_TYPE_NAMED:
+ acpi_os_printf("%4.4s\n",
+ acpi_ut_get_node_name
+ (&descriptor->
+ node));
+ break;
+
+ default:
+ acpi_os_printf("\n");
+ break;
+ }
+ }
+ }
+
+ num_outstanding++;
+ }
+
+ element = element->next;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
+
+ /* Print summary */
+
+ if (!num_outstanding) {
+ ACPI_INFO((AE_INFO, "No outstanding allocations"));
+ } else {
+ ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ num_outstanding, num_outstanding));
+ }
+
+ return_VOID;
+}
+
+#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index b09632b4f5b3..390db0ca5e2e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -147,7 +147,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
* RETURN: status - the status of the call
*
* DESCRIPTION: This function is called to get information about the current
- * state of the ACPI subsystem. It will return system information
+ * state of the ACPI subsystem. It will return system information
* in the out_buffer.
*
* If the function fails an appropriate status will be returned
@@ -238,7 +238,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
}
acpi_gbl_init_handler = handler;
- return AE_OK;
+ return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
@@ -263,6 +263,7 @@ acpi_status acpi_purge_cached_objects(void)
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
+
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6d63cc39b9ae..d4d3826140d8 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -408,7 +408,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MOVE_32_TO_32(&bad_name,
ACPI_CAST_PTR(u32, internal_name));
- acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
+ acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 1599566ed1fe..da93c003e953 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -994,7 +994,7 @@ err:
return rc;
}
-static int __devexit ghes_remove(struct platform_device *ghes_dev)
+static int ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 45e3e1759fb8..7efaeaa53b88 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -34,6 +34,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
#include <linux/proc_fs.h>
@@ -95,6 +96,18 @@ enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
+ /* On Lenovo Thinkpad models from 2010 and 2011, the power unit
+ switches between mWh and mAh depending on whether the system
+ is running on battery or not. When mAh is the unit, most
+ reported values are incorrect and need to be adjusted by
+ 10000/design_voltage. Verified on x201, t410, t410s, and x220.
+ Pre-2010 and 2012 models appear to always report in mWh and
+ are thus unaffected (tested with t42, t61, t500, x200, x300,
+ and x230). Also, in mid-2012 Lenovo issued a BIOS update for
+ the 2011 models that fixes the issue (tested on x220 with a
+ post-1.29 BIOS), but as of Nov. 2012, no such update is
+ available for the 2010 models. */
+ ACPI_BATTERY_QUIRK_THINKPAD_MAH,
};
struct acpi_battery {
@@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
kfree(buffer.pointer);
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
battery->full_charge_capacity = battery->design_capacity;
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
+ battery->power_unit && battery->design_voltage) {
+ battery->design_capacity = battery->design_capacity *
+ 10000 / battery->design_voltage;
+ battery->full_charge_capacity = battery->full_charge_capacity *
+ 10000 / battery->design_voltage;
+ battery->design_capacity_warning =
+ battery->design_capacity_warning *
+ 10000 / battery->design_voltage;
+ /* Curiously, design_capacity_low, unlike the rest of them,
+ is correct. */
+ /* capacity_granularity_* equal 1 on the systems tested, so
+ it's impossible to tell if they would need an adjustment
+ or not if their values were higher. */
+ }
return result;
}
@@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
+ battery->power_unit && battery->design_voltage) {
+ battery->capacity_now = battery->capacity_now *
+ 10000 / battery->design_voltage;
+ }
return result;
}
@@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
mutex_unlock(&battery->sysfs_lock);
}
+static void find_battery(const struct dmi_header *dm, void *private)
+{
+ struct acpi_battery *battery = (struct acpi_battery *)private;
+ /* Note: the hardcoded offsets below have been extracted from
+ the source code of dmidecode. */
+ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
+ const u8 *dmi_data = (const u8 *)(dm + 1);
+ int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
+ if (dm->length >= 18)
+ dmi_capacity *= dmi_data[17];
+ if (battery->design_capacity * battery->design_voltage / 1000
+ != dmi_capacity &&
+ battery->design_capacity * 10 == dmi_capacity)
+ set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ &battery->flags);
+ }
+}
+
/*
* According to the ACPI spec, some kinds of primary batteries can
* report percentage battery remaining capacity directly to OS.
@@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
battery->capacity_now = (battery->capacity_now *
battery->full_charge_capacity) / 100;
}
+
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
+ return ;
+
+ if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
+ const char *s;
+ s = dmi_get_system_info(DMI_PRODUCT_VERSION);
+ if (s && !strnicmp(s, "ThinkPad", 8)) {
+ dmi_walk(find_battery, battery);
+ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ &battery->flags) &&
+ battery->design_voltage) {
+ battery->design_capacity =
+ battery->design_capacity *
+ 10000 / battery->design_voltage;
+ battery->full_charge_capacity =
+ battery->full_charge_capacity *
+ 10000 / battery->design_voltage;
+ battery->design_capacity_warning =
+ battery->design_capacity_warning *
+ 10000 / battery->design_voltage;
+ battery->capacity_now = battery->capacity_now *
+ 10000 / battery->design_voltage;
+ }
+ }
+ }
}
static int acpi_battery_update(struct acpi_battery *battery)
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d59175efc428..1f0d457ecbcf 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -257,7 +257,15 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
}
-static int __acpi_bus_set_power(struct acpi_device *device, int state)
+/**
+ * acpi_device_set_power - Set power state of an ACPI device.
+ * @device: Device to set the power state of.
+ * @state: New power state to set.
+ *
+ * Callers must ensure that the device is power manageable before using this
+ * function.
+ */
+int acpi_device_set_power(struct acpi_device *device, int state)
{
int result = 0;
acpi_status status = AE_OK;
@@ -298,6 +306,12 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
* a lower-powered state.
*/
if (state < device->power.state) {
+ if (device->power.state >= ACPI_STATE_D3_HOT &&
+ state != ACPI_STATE_D0) {
+ printk(KERN_WARNING PREFIX
+ "Cannot transition to non-D0 state from D3\n");
+ return -ENODEV;
+ }
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, state);
if (result)
@@ -341,6 +355,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
return result;
}
+EXPORT_SYMBOL(acpi_device_set_power);
int acpi_bus_set_power(acpi_handle handle, int state)
@@ -359,7 +374,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
return -ENODEV;
}
- return __acpi_bus_set_power(device, state);
+ return acpi_device_set_power(device, state);
}
EXPORT_SYMBOL(acpi_bus_set_power);
@@ -402,7 +417,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p)
if (result)
return result;
- result = __acpi_bus_set_power(device, state);
+ result = acpi_device_set_power(device, state);
if (!result && state_p)
*state_p = state;
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 1f9f7d7d7bc5..811910b50b75 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -92,17 +92,24 @@ static int is_device_present(acpi_handle handle)
return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
}
+static bool is_container_device(const char *hid)
+{
+ const struct acpi_device_id *container_id;
+
+ for (container_id = container_device_ids;
+ container_id->id[0]; container_id++) {
+ if (!strcmp((char *)container_id->id, hid))
+ return true;
+ }
+
+ return false;
+}
+
/*******************************************************************/
static int acpi_container_add(struct acpi_device *device)
{
struct acpi_container *container;
-
- if (!device) {
- printk(KERN_ERR PREFIX "device is NULL\n");
- return -EINVAL;
- }
-
container = kzalloc(sizeof(struct acpi_container), GFP_KERNEL);
if (!container)
return -ENOMEM;
@@ -164,7 +171,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
case ACPI_NOTIFY_BUS_CHECK:
/* Fall through */
case ACPI_NOTIFY_DEVICE_CHECK:
- printk(KERN_WARNING "Container driver received %s event\n",
+ pr_debug("Container driver received %s event\n",
(type == ACPI_NOTIFY_BUS_CHECK) ?
"ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
@@ -185,7 +192,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
result = container_device_add(&device, handle);
if (result) {
- printk(KERN_WARNING "Failed to add container\n");
+ acpi_handle_warn(handle, "Failed to add container\n");
break;
}
@@ -232,10 +239,8 @@ container_walk_namespace_cb(acpi_handle handle,
goto end;
}
- if (strcmp(hid, "ACPI0004") && strcmp(hid, "PNP0A05") &&
- strcmp(hid, "PNP0A06")) {
+ if (!is_container_device(hid))
goto end;
- }
switch (*action) {
case INSTALL_NOTIFY_HANDLER:
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
new file mode 100644
index 000000000000..f09dc987cf17
--- /dev/null
+++ b/drivers/acpi/device_pm.c
@@ -0,0 +1,668 @@
+/*
+ * drivers/acpi/device_pm.c - ACPI device power management routines.
+ *
+ * Copyright (C) 2012, Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+
+static DEFINE_MUTEX(acpi_pm_notifier_lock);
+
+/**
+ * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
+ * @adev: ACPI device to add the notifier for.
+ * @context: Context information to pass to the notifier routine.
+ *
+ * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
+ * PM wakeup events. For example, wakeup events may be generated for bridges
+ * if one of the devices below the bridge is signaling wakeup, even if the
+ * bridge itself doesn't have a wakeup GPE associated with it.
+ */
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler, void *context)
+{
+ acpi_status status = AE_ALREADY_EXISTS;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler, context);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = true;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
+ * @adev: ACPI device to remove the notifier from.
+ */
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
+ acpi_notify_handler handler)
+{
+ acpi_status status = AE_BAD_PARAMETER;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (!adev->wakeup.flags.notifier_present)
+ goto out;
+
+ status = acpi_remove_notify_handler(adev->handle,
+ ACPI_SYSTEM_NOTIFY,
+ handler);
+ if (ACPI_FAILURE(status))
+ goto out;
+
+ adev->wakeup.flags.notifier_present = false;
+
+ out:
+ mutex_unlock(&acpi_pm_notifier_lock);
+ return status;
+}
+
+/**
+ * acpi_device_power_state - Get preferred power state of ACPI device.
+ * @dev: Device whose preferred target power state to return.
+ * @adev: ACPI device node corresponding to @dev.
+ * @target_state: System state to match the resultant device state.
+ * @d_max_in: Deepest low-power state to take into consideration.
+ * @d_min_p: Location to store the upper limit of the allowed states range.
+ * Return value: Preferred power state of the device on success, -ENODEV
+ * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
+ *
+ * Find the lowest power (highest number) ACPI device power state that the
+ * device can be in while the system is in the state represented by
+ * @target_state. If @d_min_p is set, the highest power (lowest number) device
+ * power state that @dev can be in for the given system sleep state is stored
+ * at the location pointed to by it.
+ *
+ * Callers must ensure that @dev and @adev are valid pointers and that @adev
+ * actually corresponds to @dev before using this function.
+ */
+int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
+ u32 target_state, int d_max_in, int *d_min_p)
+{
+ char acpi_method[] = "_SxD";
+ unsigned long long d_min, d_max;
+ bool wakeup = false;
+
+ if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
+ return -EINVAL;
+
+ if (d_max_in > ACPI_STATE_D3_HOT) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
+ if (stat == PM_QOS_FLAGS_ALL)
+ d_max_in = ACPI_STATE_D3_HOT;
+ }
+
+ acpi_method[2] = '0' + target_state;
+ /*
+ * If the sleep state is S0, the lowest limit from ACPI is D3,
+ * but if the device has _S0W, we will use the value from _S0W
+ * as the lowest limit from ACPI. Finally, we will constrain
+ * the lowest limit with the specified one.
+ */
+ d_min = ACPI_STATE_D0;
+ d_max = ACPI_STATE_D3;
+
+ /*
+ * If present, _SxD methods return the minimum D-state (highest power
+ * state) we can use for the corresponding S-states. Otherwise, the
+ * minimum D-state is D0 (ACPI 3.x).
+ *
+ * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
+ * provided -- that's our fault recovery, we ignore retval.
+ */
+ if (target_state > ACPI_STATE_S0) {
+ acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min);
+ wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
+ && adev->wakeup.sleep_state >= target_state;
+ } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
+ PM_QOS_FLAGS_NONE) {
+ wakeup = adev->wakeup.flags.valid;
+ }
+
+ /*
+ * If _PRW says we can wake up the system from the target sleep state,
+ * the D-state returned by _SxD is sufficient for that (we assume a
+ * wakeup-aware driver if wake is set). Still, if _SxW exists
+ * (ACPI 3.x), it should return the maximum (lowest power) D-state that
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (wakeup) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+ status = acpi_evaluate_integer(adev->handle, acpi_method, NULL,
+ &d_max);
+ if (ACPI_FAILURE(status)) {
+ if (target_state != ACPI_STATE_S0 ||
+ status != AE_NOT_FOUND)
+ d_max = d_min;
+ } else if (d_max < d_min) {
+ /* Warn the user of the broken DSDT */
+ printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+ acpi_method);
+ /* Sanitize it */
+ d_min = d_max;
+ }
+ }
+
+ if (d_max_in < d_min)
+ return -EINVAL;
+ if (d_min_p)
+ *d_min_p = d_min;
+ /* constrain d_max with specified lowest limit (max number) */
+ if (d_max > d_max_in) {
+ for (d_max = d_max_in; d_max > d_min; d_max--) {
+ if (adev->power.states[d_max].flags.valid)
+ break;
+ }
+ }
+ return d_max;
+}
+EXPORT_SYMBOL_GPL(acpi_device_power_state);
+
+/**
+ * acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
+ * @dev: Device whose preferred target power state to return.
+ * @d_min_p: Location to store the upper limit of the allowed states range.
+ * @d_max_in: Deepest low-power state to take into consideration.
+ * Return value: Preferred power state of the device on success, -ENODEV
+ * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
+ *
+ * The caller must ensure that @dev is valid before using this function.
+ */
+int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ return -ENODEV;
+ }
+
+ return acpi_device_power_state(dev, adev, acpi_target_system_state(),
+ d_max_in, d_min_p);
+}
+EXPORT_SYMBOL(acpi_pm_device_sleep_state);
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * acpi_wakeup_device - Wakeup notification handler for ACPI devices.
+ * @handle: ACPI handle of the device the notification is for.
+ * @event: Type of the signaled event.
+ * @context: Device corresponding to @handle.
+ */
+static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
+{
+ struct device *dev = context;
+
+ if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
+ pm_wakeup_event(dev, 0);
+ pm_runtime_resume(dev);
+ }
+}
+
+/**
+ * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
+ * @adev: ACPI device to enable/disable the remote wakeup for.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ *
+ * Enable/disable the GPE associated with @adev so that it can generate
+ * wakeup signals for the device in response to external (remote) events and
+ * enable/disable device wakeup power.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+
+ if (enable) {
+ acpi_status res;
+ int error;
+
+ error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
+ if (error)
+ return error;
+
+ res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(res)) {
+ acpi_disable_wakeup_device_power(adev);
+ return -EIO;
+ }
+ } else {
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
+ }
+ return 0;
+}
+
+/**
+ * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable the platform to wake up.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
+{
+ struct acpi_device *adev;
+ acpi_handle handle;
+
+ if (!device_run_wake(phys_dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(phys_dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return __acpi_device_run_wake(adev, enable);
+}
+EXPORT_SYMBOL(acpi_pm_device_run_wake);
+#else
+static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
+ void *context) {}
+#endif /* CONFIG_PM_RUNTIME */
+
+ #ifdef CONFIG_PM_SLEEP
+/**
+ * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
+ * @dev: Device to enable/desible to wake up the system.
+ * @target_state: System state the device is supposed to wake up from.
+ * @enable: Whether to enable or disable @dev to wake up the system.
+ */
+int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
+ bool enable)
+{
+ return enable ?
+ acpi_enable_wakeup_device_power(adev, target_state) :
+ acpi_disable_wakeup_device_power(adev);
+}
+
+/**
+ * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
+ * @dev: Device to enable/desible to wake up the system from sleep states.
+ * @enable: Whether to enable or disable @dev to wake up the system.
+ */
+int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
+{
+ acpi_handle handle;
+ struct acpi_device *adev;
+ int error;
+
+ if (!device_can_wakeup(dev))
+ return -EINVAL;
+
+ handle = DEVICE_ACPI_HANDLE(dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
+ dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ return -ENODEV;
+ }
+
+ error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
+ enable);
+ if (!error)
+ dev_info(dev, "System wakeup %s by ACPI\n",
+ enable ? "enabled" : "disabled");
+
+ return error;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
+ * @dev: Device to get the ACPI node for.
+ */
+static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+{
+ acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
+ struct acpi_device *adev;
+
+ return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ?
+ adev : NULL;
+}
+
+/**
+ * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
+ * @dev: Device to put into a low-power state.
+ * @adev: ACPI device node corresponding to @dev.
+ * @system_state: System state to choose the device state for.
+ */
+static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev,
+ u32 system_state)
+{
+ int power_state;
+
+ if (!acpi_device_power_manageable(adev))
+ return 0;
+
+ power_state = acpi_device_power_state(dev, adev, system_state,
+ ACPI_STATE_D3, NULL);
+ if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3)
+ return -EIO;
+
+ return acpi_device_set_power(adev, power_state);
+}
+
+/**
+ * acpi_dev_pm_full_power - Put ACPI device into the full-power state.
+ * @adev: ACPI device node to put into the full-power state.
+ */
+static int acpi_dev_pm_full_power(struct acpi_device *adev)
+{
+ return acpi_device_power_manageable(adev) ?
+ acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
+ * @dev: Device to put into a low-power state.
+ *
+ * Put the given device into a runtime low-power state using the standard ACPI
+ * mechanism. Set up remote wakeup if desired, choose the state to put the
+ * device into (this checks if remote wakeup is expected to work too), and set
+ * the power state of the device.
+ */
+int acpi_dev_runtime_suspend(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ bool remote_wakeup;
+ int error;
+
+ if (!adev)
+ return 0;
+
+ remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
+ PM_QOS_FLAGS_NONE;
+ error = __acpi_device_run_wake(adev, remote_wakeup);
+ if (remote_wakeup && error)
+ return -EAGAIN;
+
+ error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
+ if (error)
+ __acpi_device_run_wake(adev, false);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
+
+/**
+ * acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
+ * @dev: Device to put into the full-power state.
+ *
+ * Put the given device into the full-power state using the standard ACPI
+ * mechanism at run time. Set the power state of the device to ACPI D0 and
+ * disable remote wakeup.
+ */
+int acpi_dev_runtime_resume(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ int error;
+
+ if (!adev)
+ return 0;
+
+ error = acpi_dev_pm_full_power(adev);
+ __acpi_device_run_wake(adev, false);
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
+
+/**
+ * acpi_subsys_runtime_suspend - Suspend device using ACPI.
+ * @dev: Device to suspend.
+ *
+ * Carry out the generic runtime suspend procedure for @dev and use ACPI to put
+ * it into a runtime low-power state.
+ */
+int acpi_subsys_runtime_suspend(struct device *dev)
+{
+ int ret = pm_generic_runtime_suspend(dev);
+ return ret ? ret : acpi_dev_runtime_suspend(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
+
+/**
+ * acpi_subsys_runtime_resume - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state and carry out the
+ * generic runtime resume procedure for it.
+ */
+int acpi_subsys_runtime_resume(struct device *dev)
+{
+ int ret = acpi_dev_runtime_resume(dev);
+ return ret ? ret : pm_generic_runtime_resume(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * acpi_dev_suspend_late - Put device into a low-power state using ACPI.
+ * @dev: Device to put into a low-power state.
+ *
+ * Put the given device into a low-power state during system transition to a
+ * sleep state using the standard ACPI mechanism. Set up system wakeup if
+ * desired, choose the state to put the device into (this checks if system
+ * wakeup is expected to work too), and set the power state of the device.
+ */
+int acpi_dev_suspend_late(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ u32 target_state;
+ bool wakeup;
+ int error;
+
+ if (!adev)
+ return 0;
+
+ target_state = acpi_target_system_state();
+ wakeup = device_may_wakeup(dev);
+ error = __acpi_device_sleep_wake(adev, target_state, wakeup);
+ if (wakeup && error)
+ return error;
+
+ error = acpi_dev_pm_low_power(dev, adev, target_state);
+ if (error)
+ __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
+
+/**
+ * acpi_dev_resume_early - Put device into the full-power state using ACPI.
+ * @dev: Device to put into the full-power state.
+ *
+ * Put the given device into the full-power state using the standard ACPI
+ * mechanism during system transition to the working state. Set the power
+ * state of the device to ACPI D0 and disable remote wakeup.
+ */
+int acpi_dev_resume_early(struct device *dev)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+ int error;
+
+ if (!adev)
+ return 0;
+
+ error = acpi_dev_pm_full_power(adev);
+ __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ return error;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
+
+/**
+ * acpi_subsys_prepare - Prepare device for system transition to a sleep state.
+ * @dev: Device to prepare.
+ */
+int acpi_subsys_prepare(struct device *dev)
+{
+ /*
+ * Follow PCI and resume devices suspended at run time before running
+ * their system suspend callbacks.
+ */
+ pm_runtime_resume(dev);
+ return pm_generic_prepare(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
+
+/**
+ * acpi_subsys_suspend_late - Suspend device using ACPI.
+ * @dev: Device to suspend.
+ *
+ * Carry out the generic late suspend procedure for @dev and use ACPI to put
+ * it into a low-power state during system transition into a sleep state.
+ */
+int acpi_subsys_suspend_late(struct device *dev)
+{
+ int ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_dev_suspend_late(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
+
+/**
+ * acpi_subsys_resume_early - Resume device using ACPI.
+ * @dev: Device to Resume.
+ *
+ * Use ACPI to put the given device into the full-power state and carry out the
+ * generic early resume procedure for it during system transition into the
+ * working state.
+ */
+int acpi_subsys_resume_early(struct device *dev)
+{
+ int ret = acpi_dev_resume_early(dev);
+ return ret ? ret : pm_generic_resume_early(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+#endif /* CONFIG_PM_SLEEP */
+
+static struct dev_pm_domain acpi_general_pm_domain = {
+ .ops = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = acpi_subsys_runtime_suspend,
+ .runtime_resume = acpi_subsys_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+#ifdef CONFIG_PM_SLEEP
+ .prepare = acpi_subsys_prepare,
+ .suspend_late = acpi_subsys_suspend_late,
+ .resume_early = acpi_subsys_resume_early,
+ .poweroff_late = acpi_subsys_suspend_late,
+ .restore_early = acpi_subsys_resume_early,
+#endif
+ },
+};
+
+/**
+ * acpi_dev_pm_attach - Prepare device for ACPI power management.
+ * @dev: Device to prepare.
+ * @power_on: Whether or not to power on the device.
+ *
+ * If @dev has a valid ACPI handle that has a valid struct acpi_device object
+ * attached to it, install a wakeup notification handler for the device and
+ * add it to the general ACPI PM domain. If @power_on is set, the device will
+ * be put into the ACPI D0 state before the function returns.
+ *
+ * This assumes that the @dev's bus type uses generic power management callbacks
+ * (or doesn't use any power management callbacks at all).
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+int acpi_dev_pm_attach(struct device *dev, bool power_on)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+
+ if (!adev)
+ return -ENODEV;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
+ dev->pm_domain = &acpi_general_pm_domain;
+ if (power_on) {
+ acpi_dev_pm_full_power(adev);
+ __acpi_device_run_wake(adev, false);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
+
+/**
+ * acpi_dev_pm_detach - Remove ACPI power management from the device.
+ * @dev: Device to take care of.
+ * @power_off: Whether or not to try to remove power from the device.
+ *
+ * Remove the device from the general ACPI PM domain and remove its wakeup
+ * notifier. If @power_off is set, additionally remove power from the device if
+ * possible.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void acpi_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct acpi_device *adev = acpi_dev_pm_get_node(dev);
+
+ if (adev && dev->pm_domain == &acpi_general_pm_domain) {
+ dev->pm_domain = NULL;
+ acpi_remove_pm_notifier(adev, acpi_wakeup_device);
+ if (power_off) {
+ /*
+ * If the device's PM QoS resume latency limit or flags
+ * have been exposed to user space, they have to be
+ * hidden at this point, so that they don't affect the
+ * choice of the low-power state to put the device into.
+ */
+ dev_pm_qos_hide_latency_limit(dev);
+ dev_pm_qos_hide_flags(dev);
+ __acpi_device_run_wake(adev, false);
+ acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 88eb14304667..f32bd47b35e0 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/stddef.h>
+#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -460,12 +461,8 @@ static void handle_dock(struct dock_station *ds, int dock)
struct acpi_object_list arg_list;
union acpi_object arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
-
- printk(KERN_INFO PREFIX "%s - %s\n",
- (char *)name_buffer.pointer, dock ? "docking" : "undocking");
+ acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
@@ -474,11 +471,10 @@ static void handle_dock(struct dock_station *ds, int dock)
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
- " _DCK\n", (char *)name_buffer.pointer));
+ acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
+ status);
kfree(buffer.pointer);
- kfree(name_buffer.pointer);
}
static inline void dock(struct dock_station *ds)
@@ -525,9 +521,11 @@ static void dock_lock(struct dock_station *ds, int lock)
status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
if (lock)
- printk(KERN_WARNING PREFIX "Locking device failed\n");
+ acpi_handle_warn(ds->handle,
+ "Locking device failed (0x%x)\n", status);
else
- printk(KERN_WARNING PREFIX "Unlocking device failed\n");
+ acpi_handle_warn(ds->handle,
+ "Unlocking device failed (0x%x)\n", status);
}
}
@@ -667,7 +665,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
dock_lock(ds, 0);
eject_dock(ds);
if (dock_present(ds)) {
- printk(KERN_ERR PREFIX "Unable to undock!\n");
+ acpi_handle_err(ds->handle, "Unable to undock!\n");
return -EBUSY;
}
complete_undock(ds);
@@ -715,7 +713,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
- printk(KERN_ERR PREFIX "Unable to dock!\n");
+ acpi_handle_err(handle, "Unable to dock!\n");
complete_dock(ds);
break;
}
@@ -743,7 +741,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
dock_event(ds, event, UNDOCK_EVENT);
break;
default:
- printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
+ acpi_handle_err(handle, "Unknown dock event %d\n", event);
}
}
@@ -987,7 +985,7 @@ err_rmgroup:
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
- printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
+ acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
return ret;
}
@@ -1016,51 +1014,39 @@ static int dock_remove(struct dock_station *ds)
}
/**
- * find_dock - look for a dock station
+ * find_dock_and_bay - look for dock stations and bays
* @handle: acpi handle of a device
* @lvl: unused
- * @context: counter of dock stations found
+ * @context: unused
* @rv: unused
*
- * This is called by acpi_walk_namespace to look for dock stations.
+ * This is called by acpi_walk_namespace to look for dock stations and bays.
*/
static __init acpi_status
-find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
+find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- if (is_dock(handle))
+ if (is_dock(handle) || is_ejectable_bay(handle))
dock_add(handle);
return AE_OK;
}
-static __init acpi_status
-find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- /* If bay is a dock, it's already handled */
- if (is_ejectable_bay(handle) && !is_dock(handle))
- dock_add(handle);
- return AE_OK;
-}
-
static int __init dock_init(void)
{
if (acpi_disabled)
return 0;
- /* look for a dock station */
+ /* look for dock stations and bays */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL);
+ ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
- /* look for bay */
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL);
if (!dock_station_count) {
- printk(KERN_INFO PREFIX "No dock devices found.\n");
+ pr_info(PREFIX "No dock devices found.\n");
return 0;
}
register_acpi_bus_notifier(&dock_acpi_notifier);
- printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
+ pr_info(PREFIX "%s: %d docks/bays found\n",
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index a51df9681319..354007d490d1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -158,10 +158,10 @@ static int ec_transaction_done(struct acpi_ec *ec)
{
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&ec->curr_lock, flags);
+ spin_lock_irqsave(&ec->lock, flags);
if (!ec->curr || ec->curr->done)
ret = 1;
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -175,32 +175,38 @@ static void start_transaction(struct acpi_ec *ec)
static void advance_transaction(struct acpi_ec *ec, u8 status)
{
unsigned long flags;
- spin_lock_irqsave(&ec->curr_lock, flags);
- if (!ec->curr)
+ struct transaction *t = ec->curr;
+
+ spin_lock_irqsave(&ec->lock, flags);
+ if (!t)
goto unlock;
- if (ec->curr->wlen > ec->curr->wi) {
+ if (t->wlen > t->wi) {
if ((status & ACPI_EC_FLAG_IBF) == 0)
acpi_ec_write_data(ec,
- ec->curr->wdata[ec->curr->wi++]);
+ t->wdata[t->wi++]);
else
goto err;
- } else if (ec->curr->rlen > ec->curr->ri) {
+ } else if (t->rlen > t->ri) {
if ((status & ACPI_EC_FLAG_OBF) == 1) {
- ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec);
- if (ec->curr->rlen == ec->curr->ri)
- ec->curr->done = true;
+ t->rdata[t->ri++] = acpi_ec_read_data(ec);
+ if (t->rlen == t->ri)
+ t->done = true;
} else
goto err;
- } else if (ec->curr->wlen == ec->curr->wi &&
+ } else if (t->wlen == t->wi &&
(status & ACPI_EC_FLAG_IBF) == 0)
- ec->curr->done = true;
+ t->done = true;
goto unlock;
err:
- /* false interrupt, state didn't change */
- if (in_interrupt())
- ++ec->curr->irq_count;
+ /*
+ * If SCI bit is set, then don't think it's a false IRQ
+ * otherwise will take a not handled IRQ as a false one.
+ */
+ if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
+ ++t->irq_count;
+
unlock:
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static int acpi_ec_sync_query(struct acpi_ec *ec);
@@ -238,9 +244,9 @@ static int ec_poll(struct acpi_ec *ec)
if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
break;
pr_debug(PREFIX "controller reset, restart transaction\n");
- spin_lock_irqsave(&ec->curr_lock, flags);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock_irqrestore(&ec->curr_lock, flags);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -253,17 +259,17 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ spin_lock_irqsave(&ec->lock, tmp);
/* following two actions should be kept atomic */
ec->curr = t;
start_transaction(ec);
if (ec->curr->command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock_irqsave(&ec->curr_lock, tmp);
+ spin_lock_irqsave(&ec->lock, tmp);
ec->curr = NULL;
- spin_unlock_irqrestore(&ec->curr_lock, tmp);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -292,7 +298,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
return -EINVAL;
if (t->rdata)
memset(t->rdata, 0, t->rlen);
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
status = -EINVAL;
goto unlock;
@@ -310,7 +316,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
status = -ETIME;
goto end;
}
- pr_debug(PREFIX "transaction start\n");
+ pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+ t->command, t->wdata ? t->wdata[0] : 0);
/* disable GPE during transaction if storm is detected */
if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
/* It has to be disabled, so that it doesn't trigger. */
@@ -326,8 +333,9 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
/* It is safe to enable the GPE outside of the transaction. */
acpi_enable_gpe(NULL, ec->gpe);
} else if (t->irq_count > ec_storm_threshold) {
- pr_info(PREFIX "GPE storm detected, "
- "transactions will use polling mode\n");
+ pr_info(PREFIX "GPE storm detected(%d GPEs), "
+ "transactions will use polling mode\n",
+ t->irq_count);
set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
}
pr_debug(PREFIX "transaction end\n");
@@ -335,7 +343,7 @@ end:
if (ec->global_lock)
acpi_release_global_lock(glk);
unlock:
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
return status;
}
@@ -403,7 +411,7 @@ int ec_burst_disable(void)
EXPORT_SYMBOL(ec_burst_disable);
-int ec_read(u8 addr, u8 * val)
+int ec_read(u8 addr, u8 *val)
{
int err;
u8 temp_data;
@@ -468,10 +476,10 @@ void acpi_ec_block_transactions(void)
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
/* Prevent transactions from being carried out */
set_bit(EC_FLAGS_BLOCKED, &ec->flags);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions(void)
@@ -481,10 +489,10 @@ void acpi_ec_unblock_transactions(void)
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
void acpi_ec_unblock_transactions_early(void)
@@ -536,9 +544,9 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
handler->handle = handle;
handler->func = func;
handler->data = data;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_add(&handler->node, &ec->list);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
return 0;
}
@@ -547,14 +555,14 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
{
struct acpi_ec_query_handler *handler, *tmp;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
if (query_bit == handler->query_bit) {
list_del(&handler->node);
kfree(handler);
}
}
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
@@ -601,9 +609,9 @@ static void acpi_ec_gpe_query(void *ec_cxt)
struct acpi_ec *ec = ec_cxt;
if (!ec)
return;
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
acpi_ec_sync_query(ec);
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
}
static int ec_check_sci(struct acpi_ec *ec, u8 state)
@@ -622,10 +630,11 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, void *data)
{
struct acpi_ec *ec = data;
+ u8 status = acpi_ec_read_status(ec);
- pr_debug(PREFIX "~~~> interrupt\n");
+ pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
- advance_transaction(ec, acpi_ec_read_status(ec));
+ advance_transaction(ec, status);
if (ec_transaction_done(ec) &&
(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
wake_up(&ec->wait);
@@ -691,10 +700,10 @@ static struct acpi_ec *make_acpi_ec(void)
if (!ec)
return NULL;
ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
- mutex_init(&ec->lock);
+ mutex_init(&ec->mutex);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
- spin_lock_init(&ec->curr_lock);
+ spin_lock_init(&ec->lock);
return ec;
}
@@ -853,12 +862,12 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
ec = acpi_driver_data(device);
ec_remove_handlers(ec);
- mutex_lock(&ec->lock);
+ mutex_lock(&ec->mutex);
list_for_each_entry_safe(handler, tmp, &ec->list, node) {
list_del(&handler->node);
kfree(handler);
}
- mutex_unlock(&ec->lock);
+ mutex_unlock(&ec->mutex);
release_region(ec->data_addr, 1);
release_region(ec->command_addr, 1);
device->driver_data = NULL;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 08373086cd7e..01551840d236 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -130,46 +130,59 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
- struct acpi_device_physical_node *physical_node;
+ struct acpi_device_physical_node *physical_node, *pn;
char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
int retval = -EINVAL;
- if (dev->archdata.acpi_handle) {
- dev_warn(dev, "Drivers changed 'acpi_handle'\n");
- return -EINVAL;
+ if (ACPI_HANDLE(dev)) {
+ if (handle) {
+ dev_warn(dev, "ACPI handle is already set\n");
+ return -EINVAL;
+ } else {
+ handle = ACPI_HANDLE(dev);
+ }
}
+ if (!handle)
+ return -EINVAL;
get_device(dev);
status = acpi_bus_get_device(handle, &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
- physical_node = kzalloc(sizeof(struct acpi_device_physical_node),
- GFP_KERNEL);
+ physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL);
if (!physical_node) {
retval = -ENOMEM;
goto err;
}
mutex_lock(&acpi_dev->physical_node_lock);
+
+ /* Sanity check. */
+ list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+ if (pn->dev == dev) {
+ dev_warn(dev, "Already associated with ACPI node\n");
+ goto err_free;
+ }
+
/* allocate physical node id according to physical_node_id_bitmap */
physical_node->node_id =
find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
ACPI_MAX_PHYSICAL_NODE);
if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
retval = -ENOSPC;
- mutex_unlock(&acpi_dev->physical_node_lock);
- kfree(physical_node);
- goto err;
+ goto err_free;
}
set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
physical_node->dev = dev;
list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
acpi_dev->physical_node_count++;
+
mutex_unlock(&acpi_dev->physical_node_lock);
- dev->archdata.acpi_handle = handle;
+ if (!ACPI_HANDLE(dev))
+ ACPI_HANDLE_SET(dev, acpi_dev->handle);
if (!physical_node->node_id)
strcpy(physical_node_name, PHYSICAL_NODE_STRING);
@@ -187,8 +200,14 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
return 0;
err:
+ ACPI_HANDLE_SET(dev, NULL);
put_device(dev);
return retval;
+
+ err_free:
+ mutex_unlock(&acpi_dev->physical_node_lock);
+ kfree(physical_node);
+ goto err;
}
static int acpi_unbind_one(struct device *dev)
@@ -198,11 +217,10 @@ static int acpi_unbind_one(struct device *dev)
acpi_status status;
struct list_head *node, *next;
- if (!dev->archdata.acpi_handle)
+ if (!ACPI_HANDLE(dev))
return 0;
- status = acpi_bus_get_device(dev->archdata.acpi_handle,
- &acpi_dev);
+ status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
if (ACPI_FAILURE(status))
goto err;
@@ -228,7 +246,7 @@ static int acpi_unbind_one(struct device *dev)
sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
sysfs_remove_link(&dev->kobj, "firmware_node");
- dev->archdata.acpi_handle = NULL;
+ ACPI_HANDLE_SET(dev, NULL);
/* acpi_bind_one increase refcnt by one */
put_device(dev);
kfree(entry);
@@ -248,6 +266,10 @@ static int acpi_platform_notify(struct device *dev)
acpi_handle handle;
int ret = -EINVAL;
+ ret = acpi_bind_one(dev, NULL);
+ if (!ret)
+ goto out;
+
if (!dev->bus || !dev->parent) {
/* bridge devices genernally haven't bus or parent */
ret = acpi_find_bridge_device(dev, &handle);
@@ -261,16 +283,16 @@ static int acpi_platform_notify(struct device *dev)
}
if ((ret = type->find_device(dev, &handle)) != 0)
DBG("Can't get handler for %s\n", dev_name(dev));
- end:
+ end:
if (!ret)
acpi_bind_one(dev, handle);
+ out:
#if ACPI_GLUE_DEBUG
if (!ret) {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_get_name(dev->archdata.acpi_handle,
- ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
kfree(buffer.pointer);
} else
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 20a0f2c3ca3b..b514e81e8cfa 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -70,7 +70,7 @@ static int __devinit acpi_hed_add(struct acpi_device *device)
return 0;
}
-static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
+static int acpi_hed_remove(struct acpi_device *device, int type)
{
hed_handle = NULL;
return 0;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ca75b9ce0489..3c407cdc1ec1 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -58,11 +58,11 @@ struct acpi_ec {
unsigned long data_addr;
unsigned long global_lock;
unsigned long flags;
- struct mutex lock;
+ struct mutex mutex;
wait_queue_head_t wait;
struct list_head list;
struct transaction *curr;
- spinlock_t curr_lock;
+ spinlock_t lock;
};
extern struct acpi_ec *first_ec;
@@ -93,4 +93,11 @@ static inline int suspend_nvs_save(void) { return 0; }
static inline void suspend_nvs_restore(void) {}
#endif
+/*--------------------------------------------------------------------------
+ Platform bus support
+ -------------------------------------------------------------------------- */
+struct platform_device;
+
+struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 9eaf708f5885..6dc4a2b1e956 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -932,7 +932,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
* having a static work_struct.
*/
- dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
+ dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
if (!dpc)
return AE_NO_MEMORY;
@@ -944,17 +944,22 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
* because the hotplug code may call driver .remove() functions,
* which invoke flush_scheduled_work/acpi_os_wait_events_complete
* to flush these workqueues.
+ *
+ * To prevent lockdep from complaining unnecessarily, make sure that
+ * there is a different static lockdep key for each workqueue by using
+ * INIT_WORK() for each of them separately.
*/
- queue = hp ? kacpi_hotplug_wq :
- (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
- dpc->wait = hp ? 1 : 0;
-
- if (queue == kacpi_hotplug_wq)
+ if (hp) {
+ queue = kacpi_hotplug_wq;
+ dpc->wait = 1;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- else if (queue == kacpi_notify_wq)
+ } else if (type == OSL_NOTIFY_HANDLER) {
+ queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- else
+ } else {
+ queue = kacpid_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ }
/*
* On some machines, a software-initiated SMI causes corruption unless
@@ -986,6 +991,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
{
return __acpi_os_execute(0, function, context, 1);
}
+EXPORT_SYMBOL(acpi_os_hotplug_execute);
void acpi_os_wait_events_complete(void)
{
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 0eefa12e648c..23a032490130 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -459,19 +459,19 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
*/
if (gsi < 0) {
u32 dev_gsi;
- dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
- printk(" - using ISA IRQ %d\n", dev->irq);
+ dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+ pin_name(pin), dev->irq);
acpi_register_gsi(&dev->dev, dev_gsi,
ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
- return 0;
} else {
- printk("\n");
- return 0;
+ dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
+ pin_name(pin));
}
+ return 0;
}
rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
@@ -495,11 +495,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
-/* FIXME: implement x86/x86_64 version */
-void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
-{
-}
-
void acpi_pci_irq_disable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 40e38a06ba85..7db61b8fa11f 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -473,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
return ret;
no_power_resource:
- printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
+ printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!\n");
return -ENODEV;
}
EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 27adb090bb30..ef98796b3824 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file,
struct list_head *node, *next;
char strbuf[5];
char str[5] = "";
- unsigned int len = count;
- if (len > 4)
- len = 4;
- if (len < 0)
- return -EFAULT;
+ if (count > 4)
+ count = 4;
- if (copy_from_user(strbuf, buffer, len))
+ if (copy_from_user(strbuf, buffer, count))
return -EFAULT;
- strbuf[len] = '\0';
+ strbuf[count] = '\0';
sscanf(strbuf, "%s", str);
mutex_lock(&acpi_device_lock);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bd4e5dca3ff7..e83311bf1ebd 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -44,6 +44,7 @@
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/cpu.h>
@@ -282,7 +283,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
/* Declared with "Processor" statement; match ProcessorID */
status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ dev_err(&device->dev,
+ "Failed to evaluate processor object (0x%x)\n",
+ status);
return -ENODEV;
}
@@ -301,8 +304,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
NULL, &value);
if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Evaluating processor _UID [%#x]\n", status);
+ dev_err(&device->dev,
+ "Failed to evaluate processor _UID (0x%x)\n",
+ status);
return -ENODEV;
}
device_declaration = 1;
@@ -345,7 +349,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
if (!object.processor.pblk_address)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
else if (object.processor.pblk_length != 6)
- printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
+ dev_err(&device->dev, "Invalid PBLK length [%d]\n",
object.processor.pblk_length);
else {
pr->throttling.address = object.processor.pblk_address;
@@ -430,8 +434,8 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
* Initialize missing things
*/
if (pr->flags.need_hotplug_init) {
- printk(KERN_INFO "Will online and init hotplugged "
- "CPU: %d\n", pr->id);
+ pr_info("Will online and init hotplugged CPU: %d\n",
+ pr->id);
WARN(acpi_processor_start(pr), "Failed to start CPU:"
" %d\n", pr->id);
pr->flags.need_hotplug_init = 0;
@@ -492,14 +496,16 @@ static __ref int acpi_processor_start(struct acpi_processor *pr)
&pr->cdev->device.kobj,
"thermal_cooling");
if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ dev_err(&device->dev,
+ "Failed to create sysfs link 'thermal_cooling'\n");
goto err_thermal_unregister;
}
result = sysfs_create_link(&pr->cdev->device.kobj,
&device->dev.kobj,
"device");
if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ dev_err(&pr->cdev->device,
+ "Failed to create sysfs link 'device'\n");
goto err_remove_sysfs_thermal;
}
@@ -561,8 +567,9 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
*/
if (per_cpu(processor_device_array, pr->id) != NULL &&
per_cpu(processor_device_array, pr->id) != device) {
- printk(KERN_WARNING "BIOS reported wrong ACPI id "
- "for the processor\n");
+ dev_warn(&device->dev,
+ "BIOS reported wrong ACPI id %d for the processor\n",
+ pr->id);
result = -ENODEV;
goto err_free_cpumask;
}
@@ -695,8 +702,8 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
static void acpi_processor_hotplug_notify(acpi_handle handle,
u32 event, void *data)
{
- struct acpi_processor *pr;
struct acpi_device *device = NULL;
+ struct acpi_eject_event *ej_event = NULL;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
int result;
@@ -716,7 +723,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
result = acpi_processor_device_add(handle, &device);
if (result) {
- printk(KERN_ERR PREFIX "Unable to add the device\n");
+ acpi_handle_err(handle, "Unable to add the device\n");
break;
}
@@ -728,20 +735,29 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
"received ACPI_NOTIFY_EJECT_REQUEST\n"));
if (acpi_bus_get_device(handle, &device)) {
- printk(KERN_ERR PREFIX
- "Device don't exist, dropping EJECT\n");
+ acpi_handle_err(handle,
+ "Device don't exist, dropping EJECT\n");
break;
}
- pr = acpi_driver_data(device);
- if (!pr) {
- printk(KERN_ERR PREFIX
- "Driver data is NULL, dropping EJECT\n");
+ if (!acpi_driver_data(device)) {
+ acpi_handle_err(handle,
+ "Driver data is NULL, dropping EJECT\n");
break;
}
- /* REVISIT: update when eject is supported */
- ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
- break;
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ acpi_handle_err(handle, "No memory, dropping EJECT\n");
+ break;
+ }
+
+ ej_event->handle = handle;
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
+ (void *)ej_event);
+
+ /* eject is performed asynchronously */
+ return;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -841,7 +857,7 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
* and do it when the CPU gets online the first time
* TBD: Cleanup above functions and try to do this more elegant.
*/
- printk(KERN_INFO "CPU %d got hotplugged\n", pr->id);
+ pr_info("CPU %d got hotplugged\n", pr->id);
pr->flags.need_hotplug_init = 1;
return AE_OK;
@@ -852,8 +868,22 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
if (cpu_online(pr->id))
cpu_down(pr->id);
+ get_online_cpus();
+ /*
+ * The cpu might become online again at this point. So we check whether
+ * the cpu has been onlined or not. If the cpu became online, it means
+ * that someone wants to use the cpu. So acpi_processor_handle_eject()
+ * returns -EAGAIN.
+ */
+ if (unlikely(cpu_online(pr->id))) {
+ put_online_cpus();
+ pr_warn("Failed to remove CPU %d, because other task "
+ "brought the CPU back online\n", pr->id);
+ return -EAGAIN;
+ }
arch_unregister_cpu(pr->id);
acpi_unmap_lsapic(pr->id);
+ put_online_cpus();
return (0);
}
#else
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e8086c725305..f1a5da44591d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -735,31 +735,18 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- ktime_t kt1, kt2;
- s64 idle_time;
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
- local_irq_disable();
-
-
lapic_timer_state_broadcast(pr, cx, 1);
- kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
- kt2 = ktime_get_real();
- idle_time = ktime_to_us(ktime_sub(kt2, kt1));
-
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
- local_irq_enable();
lapic_timer_state_broadcast(pr, cx, 0);
return index;
@@ -806,19 +793,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
- ktime_t kt1, kt2;
- s64 idle_time_ns;
- s64 idle_time;
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
- local_irq_disable();
-
-
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -829,7 +809,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
return -EINVAL;
}
}
@@ -843,22 +822,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
- kt1 = ktime_get_real();
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_idle_do_entry(cx);
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
- idle_time = idle_time_ns;
- do_div(idle_time, NSEC_PER_USEC);
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
+ sched_clock_idle_wakeup_event(0);
- /* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(idle_time_ns);
-
- local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
@@ -883,13 +852,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr;
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
- ktime_t kt1, kt2;
- s64 idle_time_ns;
- s64 idle_time;
-
pr = __this_cpu_read(processors);
- dev->last_residency = 0;
if (unlikely(!pr))
return -EINVAL;
@@ -899,16 +863,11 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
return drv->states[drv->safe_state_index].enter(dev,
drv, drv->safe_state_index);
} else {
- local_irq_disable();
acpi_safe_halt();
- local_irq_enable();
return -EBUSY;
}
}
- local_irq_disable();
-
-
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -919,7 +878,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
- local_irq_enable();
return -EINVAL;
}
}
@@ -934,7 +892,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
*/
lapic_timer_state_broadcast(pr, cx, 1);
- kt1 = ktime_get_real();
/*
* disable bus master
* bm_check implies we need ARB_DIS
@@ -965,18 +922,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
- idle_time = idle_time_ns;
- do_div(idle_time, NSEC_PER_USEC);
-
- /* Update device last_residency*/
- dev->last_residency = (int)idle_time;
- /* Tell the scheduler how much we idled: */
- sched_clock_idle_wakeup_event(idle_time_ns);
+ sched_clock_idle_wakeup_event(0);
- local_irq_enable();
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
@@ -987,6 +935,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
/**
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
new file mode 100644
index 000000000000..a3868f6c222a
--- /dev/null
+++ b/drivers/acpi/resource.c
@@ -0,0 +1,526 @@
+/*
+ * drivers/acpi/resource.c - ACPI device resources interpretation.
+ *
+ * Copyright (C) 2012, Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_X86
+#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+#else
+#define valid_IRQ(i) (true)
+#endif
+
+static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect,
+ bool window)
+{
+ unsigned long flags = IORESOURCE_MEM;
+
+ if (len == 0)
+ flags |= IORESOURCE_DISABLED;
+
+ if (write_protect == ACPI_READ_WRITE_MEMORY)
+ flags |= IORESOURCE_MEM_WRITEABLE;
+
+ if (window)
+ flags |= IORESOURCE_WINDOW;
+
+ return flags;
+}
+
+static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
+ u8 write_protect)
+{
+ res->start = start;
+ res->end = start + len - 1;
+ res->flags = acpi_dev_memresource_flags(len, write_protect, false);
+}
+
+/**
+ * acpi_dev_resource_memory - Extract ACPI memory resource information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents a memory resource and
+ * if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
+{
+ struct acpi_resource_memory24 *memory24;
+ struct acpi_resource_memory32 *memory32;
+ struct acpi_resource_fixed_memory32 *fixed_memory32;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ memory24 = &ares->data.memory24;
+ acpi_dev_get_memresource(res, memory24->minimum,
+ memory24->address_length,
+ memory24->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ memory32 = &ares->data.memory32;
+ acpi_dev_get_memresource(res, memory32->minimum,
+ memory32->address_length,
+ memory32->write_protect);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ fixed_memory32 = &ares->data.fixed_memory32;
+ acpi_dev_get_memresource(res, fixed_memory32->address,
+ fixed_memory32->address_length,
+ fixed_memory32->write_protect);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
+
+static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode,
+ bool window)
+{
+ int flags = IORESOURCE_IO;
+
+ if (io_decode == ACPI_DECODE_16)
+ flags |= IORESOURCE_IO_16BIT_ADDR;
+
+ if (start > end || end >= 0x10003)
+ flags |= IORESOURCE_DISABLED;
+
+ if (window)
+ flags |= IORESOURCE_WINDOW;
+
+ return flags;
+}
+
+static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
+ u8 io_decode)
+{
+ u64 end = start + len - 1;
+
+ res->start = start;
+ res->end = end;
+ res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false);
+}
+
+/**
+ * acpi_dev_resource_io - Extract ACPI I/O resource information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an I/O resource and
+ * if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
+{
+ struct acpi_resource_io *io;
+ struct acpi_resource_fixed_io *fixed_io;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IO:
+ io = &ares->data.io;
+ acpi_dev_get_ioresource(res, io->minimum,
+ io->address_length,
+ io->io_decode);
+ break;
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ fixed_io = &ares->data.fixed_io;
+ acpi_dev_get_ioresource(res, fixed_io->address,
+ fixed_io->address_length,
+ ACPI_DECODE_10);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
+
+/**
+ * acpi_dev_resource_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an address space resource
+ * and if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @res.
+ */
+bool acpi_dev_resource_address_space(struct acpi_resource *ares,
+ struct resource *res)
+{
+ acpi_status status;
+ struct acpi_resource_address64 addr;
+ bool window;
+ u64 len;
+ u8 io_decode;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ break;
+ default:
+ return false;
+ }
+
+ status = acpi_resource_to_address64(ares, &addr);
+ if (ACPI_FAILURE(status))
+ return true;
+
+ res->start = addr.minimum;
+ res->end = addr.maximum;
+ window = addr.producer_consumer == ACPI_PRODUCER;
+
+ switch(addr.resource_type) {
+ case ACPI_MEMORY_RANGE:
+ len = addr.maximum - addr.minimum + 1;
+ res->flags = acpi_dev_memresource_flags(len,
+ addr.info.mem.write_protect,
+ window);
+ break;
+ case ACPI_IO_RANGE:
+ io_decode = addr.granularity == 0xfff ?
+ ACPI_DECODE_10 : ACPI_DECODE_16;
+ res->flags = acpi_dev_ioresource_flags(addr.minimum,
+ addr.maximum,
+ io_decode, window);
+ break;
+ case ACPI_BUS_NUMBER_RANGE:
+ res->flags = IORESOURCE_BUS;
+ break;
+ default:
+ res->flags = 0;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
+
+/**
+ * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an extended address space
+ * resource and if that's the case, use the information in it to populate the
+ * generic resource object pointed to by @res.
+ */
+bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
+ struct resource *res)
+{
+ struct acpi_resource_extended_address64 *ext_addr;
+ bool window;
+ u64 len;
+ u8 io_decode;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
+ return false;
+
+ ext_addr = &ares->data.ext_address64;
+
+ res->start = ext_addr->minimum;
+ res->end = ext_addr->maximum;
+ window = ext_addr->producer_consumer == ACPI_PRODUCER;
+
+ switch(ext_addr->resource_type) {
+ case ACPI_MEMORY_RANGE:
+ len = ext_addr->maximum - ext_addr->minimum + 1;
+ res->flags = acpi_dev_memresource_flags(len,
+ ext_addr->info.mem.write_protect,
+ window);
+ break;
+ case ACPI_IO_RANGE:
+ io_decode = ext_addr->granularity == 0xfff ?
+ ACPI_DECODE_10 : ACPI_DECODE_16;
+ res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
+ ext_addr->maximum,
+ io_decode, window);
+ break;
+ case ACPI_BUS_NUMBER_RANGE:
+ res->flags = IORESOURCE_BUS;
+ break;
+ default:
+ res->flags = 0;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
+
+/**
+ * acpi_dev_irq_flags - Determine IRQ resource flags.
+ * @triggering: Triggering type as provided by ACPI.
+ * @polarity: Interrupt polarity as provided by ACPI.
+ * @shareable: Whether or not the interrupt is shareable.
+ */
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
+{
+ unsigned long flags;
+
+ if (triggering == ACPI_LEVEL_SENSITIVE)
+ flags = polarity == ACPI_ACTIVE_LOW ?
+ IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL;
+ else
+ flags = polarity == ACPI_ACTIVE_LOW ?
+ IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE;
+
+ if (shareable == ACPI_SHARED)
+ flags |= IORESOURCE_IRQ_SHAREABLE;
+
+ return flags | IORESOURCE_IRQ;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
+
+static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
+{
+ res->start = gsi;
+ res->end = gsi;
+ res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED;
+}
+
+static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
+ u8 triggering, u8 polarity, u8 shareable)
+{
+ int irq, p, t;
+
+ if (!valid_IRQ(gsi)) {
+ acpi_dev_irqresource_disabled(res, gsi);
+ return;
+ }
+
+ /*
+ * In IO-APIC mode, use overrided attribute. Two reasons:
+ * 1. BIOS bug in DSDT
+ * 2. BIOS uses IO-APIC mode Interrupt Source Override
+ */
+ if (!acpi_get_override_irq(gsi, &t, &p)) {
+ u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+
+ if (triggering != trig || polarity != pol) {
+ pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
+ t ? "edge" : "level", p ? "low" : "high");
+ triggering = trig;
+ polarity = pol;
+ }
+ }
+
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
+ if (irq >= 0) {
+ res->start = irq;
+ res->end = irq;
+ } else {
+ acpi_dev_irqresource_disabled(res, gsi);
+ }
+}
+
+/**
+ * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
+ * @ares: Input ACPI resource object.
+ * @index: Index into the array of GSIs represented by the resource.
+ * @res: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an interrupt resource
+ * and @index does not exceed the resource's interrupt count (true is returned
+ * in that case regardless of the results of the other checks)). If that's the
+ * case, register the GSI corresponding to @index from the array of interrupts
+ * represented by the resource and populate the generic resource object pointed
+ * to by @res accordingly. If the registration of the GSI is not successful,
+ * IORESOURCE_DISABLED will be set it that object's flags.
+ */
+bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
+ struct resource *res)
+{
+ struct acpi_resource_irq *irq;
+ struct acpi_resource_extended_irq *ext_irq;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ /*
+ * Per spec, only one interrupt per descriptor is allowed in
+ * _CRS, but some firmware violates this, so parse them all.
+ */
+ irq = &ares->data.irq;
+ if (index >= irq->interrupt_count) {
+ acpi_dev_irqresource_disabled(res, 0);
+ return false;
+ }
+ acpi_dev_get_irqresource(res, irq->interrupts[index],
+ irq->triggering, irq->polarity,
+ irq->sharable);
+ break;
+ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ ext_irq = &ares->data.extended_irq;
+ if (index >= ext_irq->interrupt_count) {
+ acpi_dev_irqresource_disabled(res, 0);
+ return false;
+ }
+ acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
+ ext_irq->triggering, ext_irq->polarity,
+ ext_irq->sharable);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
+
+/**
+ * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources().
+ * @list: The head of the resource list to free.
+ */
+void acpi_dev_free_resource_list(struct list_head *list)
+{
+ struct resource_list_entry *rentry, *re;
+
+ list_for_each_entry_safe(rentry, re, list, node) {
+ list_del(&rentry->node);
+ kfree(rentry);
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
+
+struct res_proc_context {
+ struct list_head *list;
+ int (*preproc)(struct acpi_resource *, void *);
+ void *preproc_data;
+ int count;
+ int error;
+};
+
+static acpi_status acpi_dev_new_resource_entry(struct resource *r,
+ struct res_proc_context *c)
+{
+ struct resource_list_entry *rentry;
+
+ rentry = kmalloc(sizeof(*rentry), GFP_KERNEL);
+ if (!rentry) {
+ c->error = -ENOMEM;
+ return AE_NO_MEMORY;
+ }
+ rentry->res = *r;
+ list_add_tail(&rentry->node, c->list);
+ c->count++;
+ return AE_OK;
+}
+
+static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
+ void *context)
+{
+ struct res_proc_context *c = context;
+ struct resource r;
+ int i;
+
+ if (c->preproc) {
+ int ret;
+
+ ret = c->preproc(ares, c->preproc_data);
+ if (ret < 0) {
+ c->error = ret;
+ return AE_CTRL_TERMINATE;
+ } else if (ret > 0) {
+ return AE_OK;
+ }
+ }
+
+ memset(&r, 0, sizeof(r));
+
+ if (acpi_dev_resource_memory(ares, &r)
+ || acpi_dev_resource_io(ares, &r)
+ || acpi_dev_resource_address_space(ares, &r)
+ || acpi_dev_resource_ext_address_space(ares, &r))
+ return acpi_dev_new_resource_entry(&r, c);
+
+ for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) {
+ acpi_status status;
+
+ status = acpi_dev_new_resource_entry(&r, c);
+ if (ACPI_FAILURE(status))
+ return status;
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_dev_get_resources - Get current resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ * @preproc: The caller's preprocessing routine.
+ * @preproc_data: Pointer passed to the caller's preprocessing routine.
+ *
+ * Evaluate the _CRS method for the given device node and process its output by
+ * (1) executing the @preproc() rountine provided by the caller, passing the
+ * resource pointer and @preproc_data to it as arguments, for each ACPI resource
+ * returned and (2) converting all of the returned ACPI resources into struct
+ * resource objects if possible. If the return value of @preproc() in step (1)
+ * is different from 0, step (2) is not applied to the given ACPI resource and
+ * if that value is negative, the whole processing is aborted and that value is
+ * returned as the final error code.
+ *
+ * The resultant struct resource objects are put on the list pointed to by
+ * @list, that must be empty initially, as members of struct resource_list_entry
+ * objects. Callers of this routine should use %acpi_dev_free_resource_list() to
+ * free that list.
+ *
+ * The number of resources in the output list is returned on success, an error
+ * code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data)
+{
+ struct res_proc_context c;
+ acpi_handle not_used;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, &not_used);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1fcb8678665c..53502d1bbf26 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -29,6 +29,27 @@ extern struct acpi_device *acpi_root;
static const char *dummy_hid = "device";
+/*
+ * The following ACPI IDs are known to be suitable for representing as
+ * platform devices.
+ */
+static const struct acpi_device_id acpi_platform_device_ids[] = {
+
+ { "PNP0D40" },
+
+ /* Haswell LPSS devices */
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
+ { "INT33C2", 0 },
+ { "INT33C3", 0 },
+ { "INT33C4", 0 },
+ { "INT33C5", 0 },
+ { "INT33C6", 0 },
+ { "INT33C7", 0 },
+
+ { }
+};
+
static LIST_HEAD(acpi_device_list);
static LIST_HEAD(acpi_bus_id_list);
DEFINE_MUTEX(acpi_device_lock);
@@ -97,6 +118,7 @@ void acpi_bus_hot_remove_device(void *context)
struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
struct acpi_device *device;
acpi_handle handle = ej_event->handle;
+ acpi_handle temp;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
@@ -117,13 +139,16 @@ void acpi_bus_hot_remove_device(void *context)
goto err_out;
}
+ /* device has been freed */
+ device = NULL;
+
/* power off device */
status = acpi_evaluate_object(handle, "_PS3", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
printk(KERN_WARNING PREFIX
"Power-off device failed\n");
- if (device->flags.lockable) {
+ if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
@@ -157,6 +182,7 @@ err_out:
kfree(context);
return;
}
+EXPORT_SYMBOL(acpi_bus_hot_remove_device);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
@@ -216,6 +242,25 @@ acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *bu
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+static ssize_t acpi_device_uid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
+}
+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+
+static ssize_t acpi_device_adr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "0x%08x\n",
+ (unsigned int)(acpi_dev->pnp.bus_address));
+}
+static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+
static ssize_t
acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -259,11 +304,21 @@ static ssize_t description_show(struct device *dev,
}
static DEVICE_ATTR(description, 0444, description_show, NULL);
+static ssize_t
+acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+ return sprintf(buf, "%lu\n", acpi_dev->pnp.sun);
+}
+static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
acpi_handle temp;
+ unsigned long long sun;
int result = 0;
/*
@@ -300,6 +355,21 @@ static int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ if (dev->flags.bus_address)
+ result = device_create_file(&dev->dev, &dev_attr_adr);
+ if (dev->pnp.unique_id)
+ result = device_create_file(&dev->dev, &dev_attr_uid);
+
+ status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun);
+ if (ACPI_SUCCESS(status)) {
+ dev->pnp.sun = (unsigned long)sun;
+ result = device_create_file(&dev->dev, &dev_attr_sun);
+ if (result)
+ goto end;
+ } else {
+ dev->pnp.sun = (unsigned long)-1;
+ }
+
/*
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
@@ -331,6 +401,14 @@ static void acpi_device_remove_files(struct acpi_device *dev)
if (ACPI_SUCCESS(status))
device_remove_file(&dev->dev, &dev_attr_eject);
+ status = acpi_get_handle(dev->handle, "_SUN", &temp);
+ if (ACPI_SUCCESS(status))
+ device_remove_file(&dev->dev, &dev_attr_sun);
+
+ if (dev->pnp.unique_id)
+ device_remove_file(&dev->dev, &dev_attr_uid);
+ if (dev->flags.bus_address)
+ device_remove_file(&dev->dev, &dev_attr_adr);
device_remove_file(&dev->dev, &dev_attr_modalias);
device_remove_file(&dev->dev, &dev_attr_hid);
if (dev->handle)
@@ -340,8 +418,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
ACPI Bus operations
-------------------------------------------------------------------------- */
-int acpi_match_device_ids(struct acpi_device *device,
- const struct acpi_device_id *ids)
+static const struct acpi_device_id *__acpi_match_device(
+ struct acpi_device *device, const struct acpi_device_id *ids)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
@@ -351,14 +429,44 @@ int acpi_match_device_ids(struct acpi_device *device,
* driver for it.
*/
if (!device->status.present)
- return -ENODEV;
+ return NULL;
for (id = ids; id->id[0]; id++)
list_for_each_entry(hwid, &device->pnp.ids, list)
if (!strcmp((char *) id->id, hwid->id))
- return 0;
+ return id;
- return -ENOENT;
+ return NULL;
+}
+
+/**
+ * acpi_match_device - Match a struct device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id object to match against.
+ * @dev: The device structure to match.
+ *
+ * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
+ * object for that handle and use that object to match against a given list of
+ * device IDs.
+ *
+ * Return a pointer to the first matching ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+ const struct device *dev)
+{
+ struct acpi_device *adev;
+
+ if (!ids || !ACPI_HANDLE(dev)
+ || ACPI_FAILURE(acpi_bus_get_device(ACPI_HANDLE(dev), &adev)))
+ return NULL;
+
+ return __acpi_match_device(adev, ids);
+}
+EXPORT_SYMBOL_GPL(acpi_match_device);
+
+int acpi_match_device_ids(struct acpi_device *device,
+ const struct acpi_device_id *ids)
+{
+ return __acpi_match_device(device, ids) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
@@ -377,6 +485,7 @@ static void acpi_device_release(struct device *dev)
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_free_ids(acpi_dev);
+ kfree(acpi_dev->pnp.unique_id);
kfree(acpi_dev);
}
@@ -859,8 +968,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
{
struct acpi_device_id button_device_ids[] = {
- {"PNP0C0D", 0},
{"PNP0C0C", 0},
+ {"PNP0C0D", 0},
{"PNP0C0E", 0},
{"", 0},
};
@@ -872,6 +981,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
device->wakeup.flags.run_wake = 1;
+ if (!acpi_match_device_ids(device, &button_device_ids[1])) {
+ /* Do not use Lid/sleep button for S5 wakeup */
+ if (device->wakeup.sleep_state == ACPI_STATE_S5)
+ device->wakeup.sleep_state = ACPI_STATE_S4;
+ }
device_set_wakeup_capable(&device->dev, true);
return;
}
@@ -965,8 +1079,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
* D3hot is only valid if _PR3 present.
*/
if (ps->resources.count ||
- (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
+ (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
ps->flags.valid = 1;
+ ps->flags.os_accessible = 1;
+ }
ps->power = -1; /* Unknown - driver assigned */
ps->latency = -1; /* Unknown - driver assigned */
@@ -982,6 +1098,11 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
+ /* Presence of _PS3 or _PRx means we can put the device into D3 cold */
+ if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set ||
+ device->power.flags.power_resources)
+ device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
+
acpi_bus_init_power(device);
return 0;
@@ -1013,11 +1134,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
device->flags.ejectable = 1;
}
- /* Presence of _LCK indicates 'lockable' */
- status = acpi_get_handle(device->handle, "_LCK", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.lockable = 1;
-
/* Power resources cannot be power manageable. */
if (device->device_type == ACPI_BUS_TYPE_POWER)
return 0;
@@ -1185,7 +1301,7 @@ static void acpi_device_set_id(struct acpi_device *device)
{
acpi_status status;
struct acpi_device_info *info;
- struct acpica_device_id_list *cid_list;
+ struct acpi_pnp_device_id_list *cid_list;
int i;
switch (device->device_type) {
@@ -1212,6 +1328,9 @@ static void acpi_device_set_id(struct acpi_device *device)
device->pnp.bus_address = info->address;
device->flags.bus_address = 1;
}
+ if (info->valid & ACPI_VALID_UID)
+ device->pnp.unique_id = kstrdup(info->unique_id.string,
+ GFP_KERNEL);
kfree(info);
@@ -1483,8 +1602,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
*/
device = NULL;
acpi_bus_get_device(handle, &device);
- if (ops->acpi_op_add && !device)
+ if (ops->acpi_op_add && !device) {
acpi_add_single_object(&device, handle, type, sta, ops);
+ /* Is the device a known good platform device? */
+ if (device
+ && !acpi_match_device_ids(device, acpi_platform_device_ids))
+ acpi_create_platform_device(device);
+ }
if (!device)
return AE_CTRL_DEPTH;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fdcdbb652915..2fcc67d34b11 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -18,7 +18,6 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <asm/io.h>
@@ -81,6 +80,12 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+
+u32 acpi_target_system_state(void)
+{
+ return acpi_target_sleep_state;
+}
+
static bool pwr_btn_event_pending;
/*
@@ -98,6 +103,21 @@ void __init acpi_nvs_nosave(void)
}
/*
+ * The ACPI specification wants us to save NVS memory regions during hibernation
+ * but says nothing about saving NVS during S3. Not all versions of Windows
+ * save NVS on S3 suspend either, and it is clear that not all systems need
+ * NVS to be saved at S3 time. To improve suspend/resume time, allow the
+ * user to disable saving NVS on S3 if their system does not require it, but
+ * continue to save/restore NVS for S4 as specified.
+ */
+static bool nvs_nosave_s3;
+
+void __init acpi_nvs_nosave_s3(void)
+{
+ nvs_nosave_s3 = true;
+}
+
+/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
@@ -109,6 +129,180 @@ void __init acpi_old_suspend_ordering(void)
old_suspend_ordering = true;
}
+static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+{
+ acpi_old_suspend_ordering();
+ return 0;
+}
+
+static int __init init_nvs_nosave(const struct dmi_system_id *d)
+{
+ acpi_nvs_nosave();
+ return 0;
+}
+
+static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Abit KN9 (nForce4 variant)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "HP xw4600 Workstation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Panasonic CF51-2L",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Matsushita Electric Industrial Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW21E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB17FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR11M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Everex StepNote Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1Z1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-NW130D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCCW29FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Averatec AV1020-ED2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI DELUXE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI Premium",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-SR26GN_P",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCEB1S1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW520F",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
+ {},
+};
+
+static void acpi_sleep_dmi_check(void)
+{
+ dmi_check_system(acpisleep_dmi_table);
+}
+
/**
* acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
*/
@@ -224,6 +418,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -243,7 +438,7 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
u32 acpi_state = acpi_suspend_states[pm_state];
int error = 0;
- error = nvs_nosave ? 0 : suspend_nvs_alloc();
+ error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
if (error)
return error;
@@ -382,167 +577,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};
-
-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
-{
- old_suspend_ordering = true;
- return 0;
-}
-
-static int __init init_nvs_nosave(const struct dmi_system_id *d)
-{
- acpi_nvs_nosave();
- return 0;
-}
-
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
- {
- .callback = init_old_suspend_ordering,
- .ident = "Abit KN9 (nForce4 variant)",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "HP xw4600 Workstation",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Panasonic CF51-2L",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR,
- "Matsushita Electric Industrial Co.,Ltd."),
- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-FW21E",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCEB17FX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-SR11M",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Everex StepNote Series",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCEB1Z1E",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-NW130D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VPCCW29FX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Averatec AV1020-ED2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus A8N-SLI DELUXE",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
- },
- },
- {
- .callback = init_old_suspend_ordering,
- .ident = "Asus A8N-SLI Premium",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-SR26GN_P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Sony Vaio VGN-FW520F",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Asus K54C",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
- },
- },
- {
- .callback = init_nvs_nosave,
- .ident = "Asus K54HR",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
- },
- },
- {},
-};
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
@@ -681,177 +715,6 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM
-/**
- * acpi_pm_device_sleep_state - return preferred power state of ACPI device
- * in the system sleep state given by %acpi_target_sleep_state
- * @dev: device to examine; its driver model wakeup flags control
- * whether it should be able to wake up the system
- * @d_min_p: used to store the upper limit of allowed states range
- * @d_max_in: specify the lowest allowed states
- * Return value: preferred power state of the device on success, -ENODEV
- * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
- *
- * Find the lowest power (highest number) ACPI device power state that
- * device @dev can be in while the system is in the sleep state represented
- * by %acpi_target_sleep_state. If @wake is nonzero, the device should be
- * able to wake up the system from this sleep state. If @d_min_p is set,
- * the highest power (lowest number) device power state of @dev allowed
- * in this system sleep state is stored at the location pointed to by it.
- *
- * The caller must ensure that @dev is valid before using this function.
- * The caller is also responsible for figuring out if the device is
- * supposed to be able to wake up the system and passing this information
- * via @wake.
- */
-
-int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
-{
- acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
- struct acpi_device *adev;
- char acpi_method[] = "_SxD";
- unsigned long long d_min, d_max;
-
- if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
- return -EINVAL;
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
- printk(KERN_DEBUG "ACPI handle has no context!\n");
- return -ENODEV;
- }
-
- acpi_method[2] = '0' + acpi_target_sleep_state;
- /*
- * If the sleep state is S0, the lowest limit from ACPI is D3,
- * but if the device has _S0W, we will use the value from _S0W
- * as the lowest limit from ACPI. Finally, we will constrain
- * the lowest limit with the specified one.
- */
- d_min = ACPI_STATE_D0;
- d_max = ACPI_STATE_D3;
-
- /*
- * If present, _SxD methods return the minimum D-state (highest power
- * state) we can use for the corresponding S-states. Otherwise, the
- * minimum D-state is D0 (ACPI 3.x).
- *
- * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
- * provided -- that's our fault recovery, we ignore retval.
- */
- if (acpi_target_sleep_state > ACPI_STATE_S0)
- acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
-
- /*
- * If _PRW says we can wake up the system from the target sleep state,
- * the D-state returned by _SxD is sufficient for that (we assume a
- * wakeup-aware driver if wake is set). Still, if _SxW exists
- * (ACPI 3.x), it should return the maximum (lowest power) D-state that
- * can wake the system. _S0W may be valid, too.
- */
- if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
- adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
- acpi_status status;
-
- acpi_method[3] = 'W';
- status = acpi_evaluate_integer(handle, acpi_method, NULL,
- &d_max);
- if (ACPI_FAILURE(status)) {
- if (acpi_target_sleep_state != ACPI_STATE_S0 ||
- status != AE_NOT_FOUND)
- d_max = d_min;
- } else if (d_max < d_min) {
- /* Warn the user of the broken DSDT */
- printk(KERN_WARNING "ACPI: Wrong value from %s\n",
- acpi_method);
- /* Sanitize it */
- d_min = d_max;
- }
- }
-
- if (d_max_in < d_min)
- return -EINVAL;
- if (d_min_p)
- *d_min_p = d_min;
- /* constrain d_max with specified lowest limit (max number) */
- if (d_max > d_max_in) {
- for (d_max = d_max_in; d_max > d_min; d_max--) {
- if (adev->power.states[d_max].flags.valid)
- break;
- }
- }
- return d_max;
-}
-EXPORT_SYMBOL(acpi_pm_device_sleep_state);
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * acpi_pm_device_run_wake - Enable/disable wake-up for given device.
- * @phys_dev: Device to enable/disable the platform to wake-up the system for.
- * @enable: Whether enable or disable the wake-up functionality.
- *
- * Find the ACPI device object corresponding to @pci_dev and try to
- * enable/disable the GPE associated with it.
- */
-int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
-{
- struct acpi_device *dev;
- acpi_handle handle;
-
- if (!device_run_wake(phys_dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(phys_dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
- dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
- __func__);
- return -ENODEV;
- }
-
- if (enable) {
- acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
- acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- } else {
- acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
- acpi_disable_wakeup_device_power(dev);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(acpi_pm_device_run_wake);
-
-/**
- * acpi_pm_device_sleep_wake - enable or disable the system wake-up
- * capability of given device
- * @dev: device to handle
- * @enable: 'true' - enable, 'false' - disable the wake-up capability
- */
-int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
-{
- acpi_handle handle;
- struct acpi_device *adev;
- int error;
-
- if (!device_can_wakeup(dev))
- return -EINVAL;
-
- handle = DEVICE_ACPI_HANDLE(dev);
- if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
- dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__);
- return -ENODEV;
- }
-
- error = enable ?
- acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
- acpi_disable_wakeup_device_power(adev);
- if (!error)
- dev_info(dev, "wake-up capability %s by ACPI\n",
- enable ? "enabled" : "disabled");
-
- return error;
-}
-#endif /* CONFIG_PM_SLEEP */
-
static void acpi_power_off_prepare(void)
{
/* Prepare to power off the system */
@@ -873,13 +736,13 @@ int __init acpi_sleep_init(void)
u8 type_a, type_b;
#ifdef CONFIG_SUSPEND
int i = 0;
-
- dmi_check_system(acpisleep_dmi_table);
#endif
if (acpi_disabled)
return 0;
+ acpi_sleep_dmi_check();
+
sleep_states[ACPI_STATE_S0] = 1;
printk(KERN_INFO PREFIX "(supports S0");
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 7c3f98ba4afe..ea61ca9129cd 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -476,7 +476,7 @@ static void fixed_event_count(u32 event_number)
return;
}
-static void acpi_gbl_event_handler(u32 event_type, acpi_handle device,
+static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
if (event_type == ACPI_EVENT_TYPE_GPE)
@@ -638,7 +638,7 @@ void acpi_irq_stats_init(void)
if (all_counters == NULL)
goto fail;
- status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL);
+ status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
if (ACPI_FAILURE(status))
goto fail;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 804204d41999..6e8cc16b54c1 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -984,6 +984,38 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
}
}
+/*
+ * On some platforms, the AML code has dependency about
+ * the evaluating order of _TMP and _CRT/_HOT/_PSV/_ACx.
+ * 1. On HP Pavilion G4-1016tx, _TMP must be invoked after
+ * /_CRT/_HOT/_PSV/_ACx, or else system will be power off.
+ * 2. On HP Compaq 6715b/6715s, the return value of _PSV is 0
+ * if _TMP has never been evaluated.
+ *
+ * As this dependency is totally transparent to OS, evaluate
+ * all of them once, in the order of _CRT/_HOT/_PSV/_ACx,
+ * _TMP, before they are actually used.
+ */
+static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
+{
+ acpi_handle handle = tz->device->handle;
+ unsigned long long value;
+ int i;
+
+ acpi_evaluate_integer(handle, "_CRT", NULL, &value);
+ acpi_evaluate_integer(handle, "_HOT", NULL, &value);
+ acpi_evaluate_integer(handle, "_PSV", NULL, &value);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, name, NULL, &value);
+ if (status == AE_NOT_FOUND)
+ break;
+ }
+ acpi_evaluate_integer(handle, "_TMP", NULL, &value);
+}
+
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
int result = 0;
@@ -992,6 +1024,8 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
if (!tz)
return -EINVAL;
+ acpi_thermal_aml_dependency_fix(tz);
+
/* Get trip points [_CRT, _PSV, etc.] (required) */
result = acpi_thermal_get_trip_points(tz);
if (result)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 462f7e300363..744371304313 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -28,6 +28,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/hardirq.h>
+#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -457,3 +459,39 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
#endif
}
EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+
+/**
+ * acpi_handle_printk: Print message with ACPI prefix and object path
+ *
+ * This function is called through acpi_handle_<level> macros and prints
+ * a message with ACPI prefix and object path. This function acquires
+ * the global namespace mutex to obtain an object path. In interrupt
+ * context, it shows the object path as <n/a>.
+ */
+void
+acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ struct acpi_buffer buffer = {
+ .length = ACPI_ALLOCATE_BUFFER,
+ .pointer = NULL
+ };
+ const char *path;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (in_interrupt() ||
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
+ path = "<n/a>";
+ else
+ path = buffer.pointer;
+
+ printk("%sACPI: %s: %pV", level, path, &vaf);
+
+ va_end(args);
+ kfree(buffer.pointer);
+}
+EXPORT_SYMBOL(acpi_handle_printk);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 0230cb6cbb3a..ac9a69cd45f5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
+static int video_ignore_initial_backlight(const struct dmi_system_id *d)
+{
+ use_bios_initial_backlight = 0;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Folio 13-2000",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
+ },
+ },
{}
};
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index b728880ef10e..4ac2593234e7 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "X360"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Asus UL30VT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
+ },
+ },
{ },
};
diff --git a/drivers/base/core.c b/drivers/base/core.c
index abea76c36a4b..150a41580fad 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1180,7 +1180,6 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_pm_remove(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1205,6 +1204,7 @@ void device_del(struct device *dev)
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
+ device_pm_remove(dev);
driver_deferred_probe_del(dev);
/* Notify the platform of the removal, in case they
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 72c776f2a1f5..b2ee3bcd5a41 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
+#include <linux/acpi.h>
#include "base.h"
#include "power/power.h"
@@ -436,6 +437,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
+ ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
if (pdevinfo->dma_mask) {
/*
@@ -466,6 +468,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
+ ACPI_HANDLE_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
@@ -481,8 +484,16 @@ static int platform_drv_probe(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
- return drv->probe(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_attach(_dev, true);
+
+ ret = drv->probe(dev);
+ if (ret && ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
+
+ return ret;
}
static int platform_drv_probe_fail(struct device *_dev)
@@ -494,8 +505,13 @@ static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
+ ret = drv->remove(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
- return drv->remove(dev);
+ return ret;
}
static void platform_drv_shutdown(struct device *_dev)
@@ -504,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
}
/**
@@ -709,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then try ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eb78e9640c4a..9d8fde709390 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
+ clk_disable_unprepare(ce->clk);
if (ce->status >= PCE_STATUS_ACQUIRED)
clk_put(ce->clk);
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_enable(clk);
+ clk_prepare_enable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced on.\n");
}
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_disable(clk);
+ clk_disable_unprepare(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced off.\n");
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96b71b6536d6..acc3a8ded29d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
return -EBUSY;
not_suspended = 0;
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(pdd->dev,
+ PM_QOS_FLAG_NO_POWER_OFF
+ | PM_QOS_FLAG_REMOTE_WAKEUP);
+ if (stat > PM_QOS_FLAGS_NONE)
+ return -EBUSY;
+
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|| pdd->dev->power.irq_safe))
not_suspended++;
+ }
if (not_suspended > genpd->in_progress)
return -EBUSY;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d9468642fc41..50b2831e027d 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -23,6 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/opp.h>
#include <linux/of.h>
+#include <linux/export.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -65,6 +66,7 @@ struct opp {
unsigned long u_volt;
struct device_opp *dev_opp;
+ struct rcu_head head;
};
/**
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
+EXPORT_SYMBOL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
+EXPORT_SYMBOL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
+EXPORT_SYMBOL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev)
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
+ * using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Note: available is a modifier for the search. if available=true, then the
* match is for exact matching frequency and is available in the stored OPP
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available) {
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
- synchronize_rcu();
+ kfree_rcu(opp, head);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
new_opp);
- /* clean up old opp */
- new_opp = opp;
- goto out;
+ return 0;
unlock:
mutex_unlock(&dev_opp_list_lock);
-out:
kfree(new_opp);
return r;
}
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
+EXPORT_SYMBOL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
+EXPORT_SYMBOL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 0dbfdf4419af..b16686a0a5a2 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add(struct device *dev);
-extern void pm_qos_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
#else /* CONFIG_PM */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fbbd4ed2edf2..ff46387f5308 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -40,6 +40,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/pm_runtime.h>
#include "power.h"
@@ -48,6 +49,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ struct pm_qos_flags *pqf;
+ s32 val;
+
+ if (!qos)
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ pqf = &qos->flags;
+ if (list_empty(&pqf->list))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ val = pqf->effective_flags & mask;
+ if (val)
+ return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+ return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ unsigned long irqflags;
+ enum pm_qos_flags_status ret;
+
+ spin_lock_irqsave(&dev->power.lock, irqflags);
+ ret = __dev_pm_qos_flags(dev, mask);
+ spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+ return ret;
+}
+
+/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
@@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c = dev->power.constraints;
-
- return c ? pm_qos_read_value(c) : 0;
+ return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
}
/**
@@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev)
return ret;
}
-/*
- * apply_constraint
- * @req: constraint request to apply
- * @action: action to perform add/update/remove, of type enum pm_qos_req_action
- * @value: defines the qos request
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device and the global notification
* callbacks
*/
static int apply_constraint(struct dev_pm_qos_request *req,
- enum pm_qos_req_action action, int value)
+ enum pm_qos_req_action action, s32 value)
{
- int ret, curr_value;
-
- ret = pm_qos_update_target(req->dev->power.constraints,
- &req->node, action, value);
+ struct dev_pm_qos *qos = req->dev->power.qos;
+ int ret;
- if (ret) {
- /* Call the global callbacks if needed */
- curr_value = pm_qos_read_value(req->dev->power.constraints);
- blocking_notifier_call_chain(&dev_pm_notifiers,
- (unsigned long)curr_value,
- req);
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
+ action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)value,
+ req);
+ }
+ break;
+ case DEV_PM_QOS_FLAGS:
+ ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
}
return ret;
@@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req,
*/
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct pm_qos_constraints *c;
struct blocking_notifier_head *n;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
+ qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+ if (!qos)
return -ENOMEM;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
- kfree(c);
+ kfree(qos);
return -ENOMEM;
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
+ c = &qos->latency;
plist_head_init(&c->list);
c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ INIT_LIST_HEAD(&qos->flags.list);
+
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = c;
+ dev->power.qos = qos;
spin_unlock_irq(&dev->power.lock);
return 0;
@@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
void dev_pm_qos_constraints_init(struct device *dev)
{
mutex_lock(&dev_pm_qos_mtx);
- dev->power.constraints = NULL;
+ dev->power.qos = NULL;
dev->power.power_state = PMSG_ON;
mutex_unlock(&dev_pm_qos_mtx);
}
@@ -164,24 +220,28 @@ void dev_pm_qos_constraints_init(struct device *dev)
*/
void dev_pm_qos_constraints_destroy(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
+ struct pm_qos_flags *f;
/*
- * If the device's PM QoS resume latency limit has been exposed to user
- * space, it has to be hidden at this point.
+ * If the device's PM QoS resume latency limit or PM QoS flags have been
+ * exposed to user space, they have to be hidden at this point.
*/
dev_pm_qos_hide_latency_limit(dev);
+ dev_pm_qos_hide_flags(dev);
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
- c = dev->power.constraints;
- if (!c)
+ qos = dev->power.qos;
+ if (!qos)
goto out;
- /* Flush the constraints list for the device */
- plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /* Flush the constraints lists for the device. */
+ c = &qos->latency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
* callbacks if needed
@@ -189,13 +249,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ f = &qos->flags;
+ list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = NULL;
+ dev->power.qos = NULL;
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
- kfree(c);
+ kfree(qos);
out:
mutex_unlock(&dev_pm_qos_mtx);
@@ -205,6 +270,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
* @req: pointer to a preallocated handle
+ * @type: type of the request
* @value: defines the qos request
*
* This function inserts a new entry in the device constraints list of
@@ -218,9 +284,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
* to allocate for data structures, -ENODEV if the device has just been removed
* from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
- s32 value)
+ enum dev_pm_qos_req_type type, s32 value)
{
int ret = 0;
@@ -235,7 +304,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints) {
+ if (!dev->power.qos) {
if (dev->power.power_state.event == PM_EVENT_INVALID) {
/* The device has been removed from the system. */
req->dev = NULL;
@@ -251,8 +320,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
}
}
- if (!ret)
+ if (!ret) {
+ req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
out:
mutex_unlock(&dev_pm_qos_mtx);
@@ -262,6 +333,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ s32 curr_value;
+ int ret = 0;
+
+ if (!req->dev->power.qos)
+ return -ENODEV;
+
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ curr_value = req->data.pnode.prio;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ curr_value = req->data.flr.flags;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (curr_value != new_value)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+ return ret;
+}
+
+/**
* dev_pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a dev_pm_qos request to use
* @new_value: defines the qos request
@@ -275,11 +377,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
-int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
- s32 new_value)
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
- int ret = 0;
+ int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
@@ -289,17 +393,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
return -EINVAL;
mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- if (new_value != req->node.prio)
- ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
- new_value);
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
+ ret = __dev_pm_qos_update_request(req, new_value);
mutex_unlock(&dev_pm_qos_mtx);
+
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
@@ -315,6 +411,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
@@ -329,7 +428,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
mutex_lock(&dev_pm_qos_mtx);
- if (req->dev->power.constraints) {
+ if (req->dev->power.qos) {
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
@@ -362,13 +461,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints)
+ if (!dev->power.qos)
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
if (!ret)
ret = blocking_notifier_chain_register(
- dev->power.constraints->notifiers, notifier);
+ dev->power.qos->latency.notifiers, notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -393,9 +492,9 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
+ if (dev->power.qos)
retval = blocking_notifier_chain_unregister(
- dev->power.constraints->notifiers,
+ dev->power.qos->latency.notifiers,
notifier);
mutex_unlock(&dev_pm_qos_mtx);
@@ -449,7 +548,8 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
ancestor = ancestor->parent;
if (ancestor)
- error = dev_pm_qos_add_request(ancestor, req, value);
+ error = dev_pm_qos_add_request(ancestor, req,
+ DEV_PM_QOS_LATENCY, value);
if (error < 0)
req->dev = NULL;
@@ -459,10 +559,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
#ifdef CONFIG_PM_RUNTIME
-static void __dev_pm_qos_drop_user_request(struct device *dev)
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
{
- dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = NULL;
+ switch(type) {
+ case DEV_PM_QOS_LATENCY:
+ dev_pm_qos_remove_request(dev->power.qos->latency_req);
+ dev->power.qos->latency_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ dev_pm_qos_remove_request(dev->power.qos->flags_req);
+ dev->power.qos->flags_req = NULL;
+ break;
+ }
}
/**
@@ -478,21 +587,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
- if (dev->power.pq_req)
+ if (dev->power.qos && dev->power.qos->latency_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, value);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
if (ret < 0)
return ret;
- dev->power.pq_req = req;
- ret = pm_qos_sysfs_add(dev);
+ dev->power.qos->latency_req = req;
+ ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
return ret;
}
@@ -504,10 +613,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (dev->power.pq_req) {
- pm_qos_sysfs_remove(dev);
- __dev_pm_qos_drop_user_request(dev);
+ if (dev->power.qos && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
}
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev))
+ return -EINVAL;
+
+ if (dev->power.qos && dev->power.qos->flags_req)
+ return -EEXIST;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(dev);
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+ if (ret < 0)
+ goto fail;
+
+ dev->power.qos->flags_req = req;
+ ret = pm_qos_sysfs_add_flags(dev);
+ if (ret)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+fail:
+ pm_runtime_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (dev->power.qos && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
+ pm_runtime_get_sync(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ pm_runtime_put(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+ s32 value;
+ int ret;
+
+ if (!dev->power.qos || !dev->power.qos->flags_req)
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ value = dev_pm_qos_requested_flags(dev);
+ if (set)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+
+ return ret;
+}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index b91dc6f1e914..50d16e3cb0a9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
static ssize_t pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
}
static ssize_t pm_qos_latency_store(struct device *dev,
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.pq_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
pm_qos_latency_show, pm_qos_latency_store);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_no_power_off, 0644,
+ pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+
+static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_REMOTE_WAKEUP));
+}
+
+static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
+ pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_attrs[] = {
+static struct attribute *pm_qos_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_attr_group = {
+static struct attribute_group pm_qos_latency_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_attrs,
+ .attrs = pm_qos_latency_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_no_power_off.attr,
+ &dev_attr_pm_qos_remote_wakeup.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_flags_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_flags_attrs,
};
int dpm_sysfs_add(struct device *dev)
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add(struct device *dev)
+int pm_qos_sysfs_add_latency(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
-void pm_qos_sysfs_remove(struct device *dev)
+void pm_qos_sysfs_remove_flags(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5961e6415f08..a0b3661d90b0 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+
+config ARM_SPEAR_CPUFREQ
+ bool "SPEAr CPUFreq support"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This adds the CPUFreq driver support for SPEAr SOCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 1bc90e1306d8..1f254ec087c1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
-obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
-obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
@@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index e9158278c71d..52bf36d599f5 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
-static int __devinit cpu0_cpufreq_driver_init(void)
+static int cpu0_cpufreq_driver_init(void)
{
struct device_node *np;
int ret;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fb8a5279c5d8..1f93dbd72355 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
pure_initcall(init_cpufreq_transition_notifier_list);
static int off __read_mostly;
-int cpufreq_disabled(void)
+static int cpufreq_disabled(void)
{
return off;
}
@@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
- unsigned int ret = -EINVAL; \
+ unsigned int ret; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
return sprintf(buf, "performance\n");
else if (policy->governor)
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
policy->governor->name);
return -EINVAL;
}
@@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
- unsigned int ret = -EINVAL;
+ unsigned int ret;
char str_governor[16];
struct cpufreq_policy new_policy;
@@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
*/
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
- return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}
/**
@@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
- (CPUFREQ_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
}
out:
i += sprintf(&buf[i], "\n");
@@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
}
/**
- * show_scaling_driver - show the current cpufreq HW/BIOS limitation
+ * show_bios_limit - show the current cpufreq HW/BIOS limitation
*/
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
@@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
int retval = -EINVAL;
+ unsigned int old_target_freq = target_freq;
if (cpufreq_disabled())
return -ENODEV;
- pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
- target_freq, relation);
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+
+ if (target_freq == policy->cur)
+ return 0;
+
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
@@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
+ if (!(cpu_online(cpu) && cpufreq_driver->getavg))
+ return 0;
+
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
return -EINVAL;
- if (cpu_online(cpu) && cpufreq_driver->getavg)
- ret = cpufreq_driver->getavg(policy, cpu);
+ ret = cpufreq_driver->getavg(policy, cpu);
cpufreq_cpu_put(policy);
return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index a152af7e1991..64ef737e7e72 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -11,83 +11,30 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <linux/ktime.h>
-#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/percpu-defs.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
-/*
- * dbs is used in this file as a shortform for demandbased switching
- * It helps to keep variable names smaller, simpler
- */
+#include "cpufreq_governor.h"
+/* Conservative governor macors */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
-
-/*
- * The polling frequency of this governor depends on the capability of
- * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
- * rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
- * All times here are in uS.
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-
-static unsigned int min_sampling_rate;
-
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
-static void do_dbs_timer(struct work_struct *work);
-
-struct cpu_dbs_info_s {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_nice;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- unsigned int down_skip;
- unsigned int requested_freq;
- int cpu;
- unsigned int enable:1;
- /*
- * percpu mutex that serializes governor limit change with
- * do_dbs_timer invocation. We do not want do_dbs_timer to run
- * when user is changing the governor or limits.
- */
- struct mutex timer_mutex;
-};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
-static unsigned int dbs_enable; /* number of CPUs using this policy */
+static struct dbs_data cs_dbs_data;
+static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
-/*
- * dbs_mutex protects dbs_enable in governor start/stop.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int down_threshold;
- unsigned int ignore_nice;
- unsigned int freq_step;
-} dbs_tuners_ins = {
+static struct cs_dbs_tuners cs_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -95,95 +42,121 @@ static struct dbs_tuners {
.freq_step = 5,
};
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%, then
+ * we try to decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of maximum frequency
+ */
+static void cs_check_cpu(int cpu, unsigned int load)
{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
+ struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ unsigned int freq_target;
+
+ /*
+ * break out if we 'cannot' reduce the speed as the user might
+ * want freq_step to be zero
+ */
+ if (cs_tuners.freq_step == 0)
+ return;
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ /* Check for frequency increase */
+ if (load > cs_tuners.up_threshold) {
+ dbs_info->down_skip = 0;
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+ /* if we are already at full speed then break out early */
+ if (dbs_info->requested_freq == policy->max)
+ return;
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
+ freq_target = (cs_tuners.freq_step * policy->max) / 100;
- return jiffies_to_usecs(idle_time);
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_target == 0))
+ freq_target = 5;
+
+ dbs_info->requested_freq += freq_target;
+ if (dbs_info->requested_freq > policy->max)
+ dbs_info->requested_freq = policy->max;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /*
+ * The optimal frequency is the frequency that is the lowest that can
+ * support the current CPU usage without triggering the up policy. To be
+ * safe, we focus 10 points under the threshold.
+ */
+ if (load < (cs_tuners.down_threshold - 10)) {
+ freq_target = (cs_tuners.freq_step * policy->max) / 100;
+
+ dbs_info->requested_freq -= freq_target;
+ if (dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = policy->min;
+
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+ if (policy->cur == policy->min)
+ return;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
}
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+static void cs_dbs_timer(struct work_struct *work)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+ struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
+ struct cs_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cpu;
+ int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else
- idle_time += get_cpu_iowait_time_us(cpu, wall);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- return idle_time;
+ dbs_check_cpu(&cs_dbs_data, cpu);
+
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
-/* keep track of frequency transitions */
-static int
-dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
- freq->cpu);
-
+ struct cs_cpu_dbs_info_s *dbs_info =
+ &per_cpu(cs_cpu_dbs_info, freq->cpu);
struct cpufreq_policy *policy;
- if (!this_dbs_info->enable)
+ if (!dbs_info->enable)
return 0;
- policy = this_dbs_info->cur_policy;
+ policy = dbs_info->cdbs.cur_policy;
/*
- * we only care if our internally tracked freq moves outside
- * the 'valid' ranges of freqency available to us otherwise
- * we do not change it
+ * we only care if our internally tracked freq moves outside the 'valid'
+ * ranges of freqency available to us otherwise we do not change it
*/
- if (this_dbs_info->requested_freq > policy->max
- || this_dbs_info->requested_freq < policy->min)
- this_dbs_info->requested_freq = freq->new;
+ if (dbs_info->requested_freq > policy->max
+ || dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = freq->new;
return 0;
}
-static struct notifier_block dbs_cpufreq_notifier_block = {
- .notifier_call = dbs_cpufreq_notifier
-};
-
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- return sprintf(buf, "%u\n", min_sampling_rate);
+ return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
}
-define_one_global_ro(sampling_rate_min);
-
-/* cpufreq_conservative Governor Tunables */
-#define show_one(file_name, object) \
-static ssize_t show_##file_name \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
-}
-show_one(sampling_rate, sampling_rate);
-show_one(sampling_down_factor, sampling_down_factor);
-show_one(up_threshold, up_threshold);
-show_one(down_threshold, down_threshold);
-show_one(ignore_nice_load, ignore_nice);
-show_one(freq_step, freq_step);
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- dbs_tuners_ins.sampling_down_factor = input;
+ cs_tuners.sampling_down_factor = input;
return count;
}
@@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
+ cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
return count;
}
@@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold)
+ if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
return -EINVAL;
- dbs_tuners_ins.up_threshold = input;
+ cs_tuners.up_threshold = input;
return count;
}
@@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold)
+ input >= cs_tuners.up_threshold)
return -EINVAL;
- dbs_tuners_ins.down_threshold = input;
+ cs_tuners.down_threshold = input;
return count;
}
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
- unsigned int input;
+ unsigned int input, j;
int ret;
- unsigned int j;
-
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
+ if (input == cs_tuners.ignore_nice) /* nothing to do */
return count;
- dbs_tuners_ins.ignore_nice = input;
+ cs_tuners.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
+ struct cs_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall);
+ if (cs_tuners.ignore_nice)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
@@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
if (input > 100)
input = 100;
- /* no need to test here if freq_step is zero as the user might actually
- * want this, they would be crazy though :) */
- dbs_tuners_ins.freq_step = input;
+ /*
+ * no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :)
+ */
+ cs_tuners.freq_step = input;
return count;
}
+show_one(cs, sampling_rate, sampling_rate);
+show_one(cs, sampling_down_factor, sampling_down_factor);
+show_one(cs, up_threshold, up_threshold);
+show_one(cs, down_threshold, down_threshold);
+show_one(cs, ignore_nice_load, ignore_nice);
+show_one(cs, freq_step, freq_step);
+
define_one_global_rw(sampling_rate);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(up_threshold);
define_one_global_rw(down_threshold);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
+define_one_global_ro(sampling_rate_min);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = {
NULL
};
-static struct attribute_group dbs_attr_group = {
+static struct attribute_group cs_attr_group = {
.attrs = dbs_attributes,
.name = "conservative",
};
/************************** sysfs end ************************/
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
-{
- unsigned int load = 0;
- unsigned int max_load = 0;
- unsigned int freq_target;
-
- struct cpufreq_policy *policy;
- unsigned int j;
-
- policy = this_dbs_info->cur_policy;
-
- /*
- * Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
- * Every sampling_rate*sampling_down_factor, we check, if current
- * idle time is more than 80%, then we try to decrease frequency
- *
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of maximum frequency
- */
-
- /* Get Absolute Load */
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
-
- j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
-
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
-
- wall_time = (unsigned int)
- (cur_wall_time - j_dbs_info->prev_cpu_wall);
- j_dbs_info->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = cur_idle_time;
-
- if (dbs_tuners_ins.ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- j_dbs_info->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
-
- j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
- }
+define_get_cpu_dbs_routines(cs_cpu_dbs_info);
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- load = 100 * (wall_time - idle_time) / wall_time;
-
- if (load > max_load)
- max_load = load;
- }
-
- /*
- * break out if we 'cannot' reduce the speed as the user might
- * want freq_step to be zero
- */
- if (dbs_tuners_ins.freq_step == 0)
- return;
-
- /* Check for frequency increase */
- if (max_load > dbs_tuners_ins.up_threshold) {
- this_dbs_info->down_skip = 0;
-
- /* if we are already at full speed then break out early */
- if (this_dbs_info->requested_freq == policy->max)
- return;
-
- freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
-
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_target == 0))
- freq_target = 5;
-
- this_dbs_info->requested_freq += freq_target;
- if (this_dbs_info->requested_freq > policy->max)
- this_dbs_info->requested_freq = policy->max;
-
- __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
- return;
- }
-
- /*
- * The optimal frequency is the frequency that is the lowest that
- * can support the current CPU usage without triggering the up
- * policy. To be safe, we focus 10 points under the threshold.
- */
- if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
- freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
-
- this_dbs_info->requested_freq -= freq_target;
- if (this_dbs_info->requested_freq < policy->min)
- this_dbs_info->requested_freq = policy->min;
-
- /*
- * if we cannot reduce the frequency anymore, break out early
- */
- if (policy->cur == policy->min)
- return;
-
- __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
- return;
- }
-}
-
-static void do_dbs_timer(struct work_struct *work)
-{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
-
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
-
- delay -= jiffies % delay;
-
- mutex_lock(&dbs_info->timer_mutex);
-
- dbs_check_cpu(dbs_info);
-
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
- mutex_unlock(&dbs_info->timer_mutex);
-}
-
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
-{
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
- delay -= jiffies % delay;
+static struct notifier_block cs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier,
+};
- dbs_info->enable = 1;
- INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
-}
+static struct cs_ops cs_ops = {
+ .notifier_block = &cs_cpufreq_notifier_block,
+};
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
-{
- dbs_info->enable = 0;
- cancel_delayed_work_sync(&dbs_info->work);
-}
+static struct dbs_data cs_dbs_data = {
+ .governor = GOV_CONSERVATIVE,
+ .attr_group = &cs_attr_group,
+ .tuners = &cs_tuners,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = cs_dbs_timer,
+ .gov_check_cpu = cs_check_cpu,
+ .gov_ops = &cs_ops,
+};
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event)
{
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info_s *this_dbs_info;
- unsigned int j;
- int rc;
-
- this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
- return -EINVAL;
-
- mutex_lock(&dbs_mutex);
-
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- j_dbs_info->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- this_dbs_info->cpu = cpu;
- this_dbs_info->down_skip = 0;
- this_dbs_info->requested_freq = policy->cur;
-
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_enable++;
- /*
- * Start the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 1) {
- unsigned int latency;
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &dbs_attr_group);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
- /*
- * conservative does not implement micro like ondemand
- * governor, thus we are bound to jiffes/HZ
- */
- min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
- /* Bring kernel and HW constraints together */
- min_sampling_rate = max(min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_tuners_ins.sampling_rate =
- max(min_sampling_rate,
- latency * LATENCY_MULTIPLIER);
-
- cpufreq_register_notifier(
- &dbs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
- mutex_unlock(&dbs_mutex);
-
- dbs_timer_init(this_dbs_info);
-
- break;
-
- case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
-
- mutex_lock(&dbs_mutex);
- dbs_enable--;
- mutex_destroy(&this_dbs_info->timer_mutex);
-
- /*
- * Stop the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 0)
- cpufreq_unregister_notifier(
- &dbs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
- sysfs_remove_group(cpufreq_global_kobject,
- &dbs_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&this_dbs_info->timer_mutex);
- if (policy->max < this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(
- this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(
- this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(this_dbs_info);
- mutex_unlock(&this_dbs_info->timer_mutex);
-
- break;
- }
- return 0;
+ return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -597,13 +333,14 @@ static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
- .governor = cpufreq_governor_dbs,
+ .governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void)
{
+ mutex_init(&cs_dbs_data.mutex);
return cpufreq_register_governor(&cpufreq_gov_conservative);
}
@@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_conservative);
}
-
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
"Low Latency Frequency Transition capable processors "
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
new file mode 100644
index 000000000000..6c5f1d383cdc
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -0,0 +1,318 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.c
+ *
+ * CPUFREQ governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cputime.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
+#include <linux/tick.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "cpufreq_governor.h"
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
+{
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct cpufreq_policy *policy;
+ unsigned int max_load = 0;
+ unsigned int ignore_nice;
+ unsigned int j;
+
+ if (dbs_data->governor == GOV_ONDEMAND)
+ ignore_nice = od_tuners->ignore_nice;
+ else
+ ignore_nice = cs_tuners->ignore_nice;
+
+ policy = cdbs->cur_policy;
+
+ /* Get Absolute Load (in terms of freq for ondemand gov) */
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+ u64 cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
+ unsigned int load;
+
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ wall_time = (unsigned int)
+ (cur_wall_time - j_cdbs->prev_cpu_wall);
+ j_cdbs->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int)
+ (cur_idle_time - j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_cpu_idle = cur_idle_time;
+
+ if (ignore_nice) {
+ u64 cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ cdbs->prev_cpu_nice;
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+
+ cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
+ if (dbs_data->governor == GOV_ONDEMAND) {
+ struct od_cpu_dbs_info_s *od_j_dbs_info =
+ dbs_data->get_cpu_dbs_info_s(cpu);
+
+ cur_iowait_time = get_cpu_iowait_time_us(j,
+ &cur_wall_time);
+ if (cur_iowait_time == -1ULL)
+ cur_iowait_time = 0;
+
+ iowait_time = (unsigned int) (cur_iowait_time -
+ od_j_dbs_info->prev_cpu_iowait);
+ od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
+
+ /*
+ * For the purpose of ondemand, waiting for disk IO is
+ * an indication that you're performance critical, and
+ * not that the system is actually idle. So subtract the
+ * iowait time from the cpu idle time.
+ */
+ if (od_tuners->io_is_busy && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+ }
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ load = 100 * (wall_time - idle_time) / wall_time;
+
+ if (dbs_data->governor == GOV_ONDEMAND) {
+ int freq_avg = __cpufreq_driver_getavg(policy, j);
+ if (freq_avg <= 0)
+ freq_avg = policy->cur;
+
+ load *= freq_avg;
+ }
+
+ if (load > max_load)
+ max_load = load;
+ }
+
+ dbs_data->gov_check_cpu(cpu, max_load);
+}
+EXPORT_SYMBOL_GPL(dbs_check_cpu);
+
+static inline void dbs_timer_init(struct dbs_data *dbs_data,
+ struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+{
+ int delay = delay_for_sampling_rate(sampling_rate);
+
+ INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
+ schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+}
+
+static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
+{
+ cancel_delayed_work_sync(&cdbs->work);
+}
+
+int cpufreq_governor_dbs(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy, unsigned int event)
+{
+ struct od_cpu_dbs_info_s *od_dbs_info = NULL;
+ struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct cpu_dbs_common_info *cpu_cdbs;
+ unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+ int rc;
+
+ cpu_cdbs = dbs_data->get_cpu_cdbs(cpu);
+
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
+ sampling_rate = &cs_tuners->sampling_rate;
+ ignore_nice = cs_tuners->ignore_nice;
+ } else {
+ od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
+ sampling_rate = &od_tuners->sampling_rate;
+ ignore_nice = od_tuners->ignore_nice;
+ }
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ mutex_lock(&dbs_data->mutex);
+
+ dbs_data->enable++;
+ cpu_cdbs->cpu = cpu;
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+
+ j_cdbs->cur_policy = policy;
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_cdbs->prev_cpu_wall);
+ if (ignore_nice)
+ j_cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
+
+ /*
+ * Start the timerschedule work, when this governor is used for
+ * first time
+ */
+ if (dbs_data->enable != 1)
+ goto second_time;
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_data->mutex);
+ return rc;
+ }
+
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
+ /*
+ * conservative does not implement micro like ondemand
+ * governor, thus we are bound to jiffes/HZ
+ */
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ struct cs_ops *ops = dbs_data->gov_ops;
+
+ cpufreq_register_notifier(ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ } else {
+ struct od_ops *ops = dbs_data->gov_ops;
+
+ od_tuners->io_is_busy = ops->io_busy();
+ }
+
+ /* Bring kernel and HW constraints together */
+ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ *sampling_rate = max(dbs_data->min_sampling_rate, latency *
+ LATENCY_MULTIPLIER);
+
+second_time:
+ if (dbs_data->governor == GOV_CONSERVATIVE) {
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
+ } else {
+ struct od_ops *ops = dbs_data->gov_ops;
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ ops->powersave_bias_init_cpu(cpu);
+ }
+ mutex_unlock(&dbs_data->mutex);
+
+ mutex_init(&cpu_cdbs->timer_mutex);
+ dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ if (dbs_data->governor == GOV_CONSERVATIVE)
+ cs_dbs_info->enable = 0;
+
+ dbs_timer_exit(cpu_cdbs);
+
+ mutex_lock(&dbs_data->mutex);
+ mutex_destroy(&cpu_cdbs->timer_mutex);
+ dbs_data->enable--;
+ if (!dbs_data->enable) {
+ struct cs_ops *ops = dbs_data->gov_ops;
+
+ sysfs_remove_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (dbs_data->governor == GOV_CONSERVATIVE)
+ cpufreq_unregister_notifier(ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+ mutex_unlock(&dbs_data->mutex);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&cpu_cdbs->timer_mutex);
+ if (policy->max < cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ dbs_check_cpu(dbs_data, cpu);
+ mutex_unlock(&cpu_cdbs->timer_mutex);
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
new file mode 100644
index 000000000000..f6616540c53d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -0,0 +1,176 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.h
+ *
+ * Header file for CPUFreq governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _CPUFREQ_GOVERNER_H
+#define _CPUFREQ_GOVERNER_H
+
+#include <linux/cpufreq.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * governor will work on any processor with transition latency <= 10mS, using
+ * appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work. All times here are in uS.
+ */
+#define MIN_SAMPLING_RATE_RATIO (2)
+#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (100)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
+/* Ondemand Sampling types */
+enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
+
+/* Macro creating sysfs show routines */
+#define show_one(_gov, file_name, object) \
+static ssize_t show_##file_name \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", _gov##_tuners.object); \
+}
+
+#define define_get_cpu_dbs_routines(_dbs_info) \
+static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu).cdbs; \
+} \
+ \
+static void *get_cpu_dbs_info_s(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu); \
+}
+
+/*
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * on_*: On-demand governor
+ * cs_*: Conservative governor
+ */
+
+/* Per cpu structures */
+struct cpu_dbs_common_info {
+ int cpu;
+ u64 prev_cpu_idle;
+ u64 prev_cpu_wall;
+ u64 prev_cpu_nice;
+ struct cpufreq_policy *cur_policy;
+ struct delayed_work work;
+ /*
+ * percpu mutex that serializes governor limit change with gov_dbs_timer
+ * invocation. We do not want gov_dbs_timer to run when user is changing
+ * the governor or limits.
+ */
+ struct mutex timer_mutex;
+};
+
+struct od_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ u64 prev_cpu_iowait;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_jiffies;
+ unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
+ unsigned int sample_type:1;
+};
+
+struct cs_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+ unsigned int enable:1;
+};
+
+/* Governers sysfs tunables */
+struct od_dbs_tuners {
+ unsigned int ignore_nice;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_differential;
+ unsigned int powersave_bias;
+ unsigned int io_is_busy;
+};
+
+struct cs_dbs_tuners {
+ unsigned int ignore_nice;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
+/* Per Governer data */
+struct dbs_data {
+ /* Common across governors */
+ #define GOV_ONDEMAND 0
+ #define GOV_CONSERVATIVE 1
+ int governor;
+ unsigned int min_sampling_rate;
+ unsigned int enable; /* number of CPUs using this policy */
+ struct attribute_group *attr_group;
+ void *tuners;
+
+ /* dbs_mutex protects dbs_enable in governor start/stop */
+ struct mutex mutex;
+
+ struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+ void *(*get_cpu_dbs_info_s)(int cpu);
+ void (*gov_dbs_timer)(struct work_struct *work);
+ void (*gov_check_cpu)(int cpu, unsigned int load);
+
+ /* Governor specific ops, see below */
+ void *gov_ops;
+};
+
+/* Governor specific ops, will be passed to dbs_data->gov_ops */
+struct od_ops {
+ int (*io_busy)(void);
+ void (*powersave_bias_init_cpu)(int cpu);
+ unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation);
+ void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
+};
+
+struct cs_ops {
+ struct notifier_block *notifier_block;
+};
+
+static inline int delay_for_sampling_rate(unsigned int sampling_rate)
+{
+ int delay = usecs_to_jiffies(sampling_rate);
+
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
+
+ return delay;
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+int cpufreq_governor_dbs(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy, unsigned int event);
+#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 396322f2a83f..7731f7c7e79a 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -10,24 +10,23 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/jiffies.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/kernel_stat.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/hrtimer.h>
+#include <linux/percpu-defs.h>
+#include <linux/sysfs.h>
#include <linux/tick.h>
-#include <linux/ktime.h>
-#include <linux/sched.h>
+#include <linux/types.h>
-/*
- * dbs is used in this file as a shortform for demandbased switching
- * It helps to keep variable names smaller, simpler
- */
+#include "cpufreq_governor.h"
+/* On-demand governor macors */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -38,80 +37,14 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-/*
- * The polling frequency of this governor depends on the capability of
- * the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
- * rate.
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work.
- * All times here are in uS.
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-
-static unsigned int min_sampling_rate;
-
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (100)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
-static void do_dbs_timer(struct work_struct *work);
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event);
+static struct dbs_data od_dbs_data;
+static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
+static struct cpufreq_governor cpufreq_gov_ondemand;
#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-struct cpu_dbs_info_s {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_iowait;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_nice;
- struct cpufreq_policy *cur_policy;
- struct delayed_work work;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_lo;
- unsigned int freq_lo_jiffies;
- unsigned int freq_hi_jiffies;
- unsigned int rate_mult;
- int cpu;
- unsigned int sample_type:1;
- /*
- * percpu mutex that serializes governor limit change with
- * do_dbs_timer invocation. We do not want do_dbs_timer to run
- * when user is changing the governor or limits.
- */
- struct mutex timer_mutex;
-};
-static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
-
-static unsigned int dbs_enable; /* number of CPUs using this policy */
-
-/*
- * dbs_mutex protects dbs_enable in governor start/stop.
- */
-static DEFINE_MUTEX(dbs_mutex);
-
-static struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int up_threshold;
- unsigned int down_differential;
- unsigned int ignore_nice;
- unsigned int sampling_down_factor;
- unsigned int powersave_bias;
- unsigned int io_is_busy;
-} dbs_tuners_ins = {
+static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
@@ -119,48 +52,35 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
-
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
-
- return jiffies_to_usecs(idle_time);
-}
-
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+static void ondemand_powersave_bias_init_cpu(int cpu)
{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
-
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else
- idle_time += get_cpu_iowait_time_us(cpu, wall);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- return idle_time;
+ dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ dbs_info->freq_lo = 0;
}
-static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
-
- if (iowait_time == -1ULL)
- return 0;
-
- return iowait_time;
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
}
/*
@@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
*/
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
- unsigned int freq_next,
- unsigned int relation)
+ unsigned int freq_next, unsigned int relation)
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);
if (!dbs_info->freq_table) {
@@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
relation, &index);
freq_req = dbs_info->freq_table[index].frequency;
- freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
+ freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
@@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
dbs_info->freq_lo_jiffies = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
jiffies_hi += ((freq_hi - freq_lo) / 2);
jiffies_hi /= (freq_hi - freq_lo);
@@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
return freq_hi;
}
-static void ondemand_powersave_bias_init_cpu(int cpu)
-{
- struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
- dbs_info->freq_lo = 0;
-}
-
static void ondemand_powersave_bias_init(void)
{
int i;
@@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void)
}
}
-/************************** sysfs interface ************************/
+static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+{
+ if (od_tuners.powersave_bias)
+ freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+ else if (p->cur == p->max)
+ return;
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency Every sampling_rate, we look for
+ * a the lowest frequency which can sustain the load while keeping idle time
+ * over 30%. If such a frequency exist, we try to decrease to this frequency.
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of current frequency
+ */
+static void od_check_cpu(int cpu, unsigned int load_freq)
{
- return sprintf(buf, "%u\n", min_sampling_rate);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+
+ dbs_info->freq_lo = 0;
+
+ /* Check for frequency increase */
+ if (load_freq > od_tuners.up_threshold * policy->cur) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ dbs_info->rate_mult =
+ od_tuners.sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ /* if we cannot reduce the frequency anymore, break out early */
+ if (policy->cur == policy->min)
+ return;
+
+ /*
+ * The optimal frequency is the frequency that is the lowest that can
+ * support the current CPU usage without triggering the up policy. To be
+ * safe, we focus 10 points under the threshold.
+ */
+ if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
+ policy->cur) {
+ unsigned int freq_next;
+ freq_next = load_freq / (od_tuners.up_threshold -
+ od_tuners.down_differential);
+
+ /* No longer fully busy, reset rate_mult */
+ dbs_info->rate_mult = 1;
+
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
+ if (!od_tuners.powersave_bias) {
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ } else {
+ int freq = powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq,
+ CPUFREQ_RELATION_L);
+ }
+ }
}
-define_one_global_ro(sampling_rate_min);
+static void od_dbs_timer(struct work_struct *work)
+{
+ struct od_cpu_dbs_info_s *dbs_info =
+ container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cpu;
+ int delay, sample_type = dbs_info->sample_type;
+
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+
+ /* Common NORMAL_SAMPLE setup */
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ if (sample_type == OD_SUB_SAMPLE) {
+ delay = dbs_info->freq_lo_jiffies;
+ __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
+ dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ } else {
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = dbs_info->freq_hi_jiffies;
+ } else {
+ delay = delay_for_sampling_rate(od_tuners.sampling_rate
+ * dbs_info->rate_mult);
+ }
+ }
+
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+}
+
+/************************** sysfs interface ************************/
-/* cpufreq_ondemand Governor Tunables */
-#define show_one(file_name, object) \
-static ssize_t show_##file_name \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+static ssize_t show_sampling_rate_min(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
}
-show_one(sampling_rate, sampling_rate);
-show_one(io_is_busy, io_is_busy);
-show_one(up_threshold, up_threshold);
-show_one(sampling_down_factor, sampling_down_factor);
-show_one(ignore_nice_load, ignore_nice);
-show_one(powersave_bias, powersave_bias);
/**
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
* If new rate is smaller than the old, simply updaing
- * dbs_tuners_int.sampling_rate might not be appropriate. For example,
- * if the original sampling_rate was 1 second and the requested new sampling
- * rate is 10 ms because the user needs immediate reaction from ondemand
- * governor, but not sure if higher frequency will be required or not,
- * then, the governor may change the sampling rate too late; up to 1 second
- * later. Thus, if we are reducing the sampling rate, we need to make the
- * new value effective immediately.
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
*/
static void update_sampling_rate(unsigned int new_rate)
{
int cpu;
- dbs_tuners_ins.sampling_rate = new_rate
- = max(new_rate, min_sampling_rate);
+ od_tuners.sampling_rate = new_rate = max(new_rate,
+ od_dbs_data.min_sampling_rate);
for_each_online_cpu(cpu) {
struct cpufreq_policy *policy;
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
unsigned long next_sampling, appointed_at;
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
+ if (policy->governor != &cpufreq_gov_ondemand) {
+ cpufreq_cpu_put(policy);
+ continue;
+ }
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
cpufreq_cpu_put(policy);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- if (!delayed_work_pending(&dbs_info->work)) {
- mutex_unlock(&dbs_info->timer_mutex);
+ if (!delayed_work_pending(&dbs_info->cdbs.work)) {
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
continue;
}
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->work.timer.expires;
-
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->cdbs.work.timer.expires;
if (time_before(next_sampling, appointed_at)) {
- mutex_unlock(&dbs_info->timer_mutex);
- cancel_delayed_work_sync(&dbs_info->work);
- mutex_lock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->cdbs.work);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
- usecs_to_jiffies(new_rate));
+ schedule_delayed_work_on(dbs_info->cdbs.cpu,
+ &dbs_info->cdbs.work,
+ usecs_to_jiffies(new_rate));
}
- mutex_unlock(&dbs_info->timer_mutex);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
}
@@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.io_is_busy = !!input;
+ od_tuners.io_is_busy = !!input;
return count;
}
@@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
- dbs_tuners_ins.up_threshold = input;
+ od_tuners.up_threshold = input;
return count;
}
@@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- dbs_tuners_ins.sampling_down_factor = input;
+ od_tuners.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
dbs_info->rate_mult = 1;
}
return count;
@@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ if (input == od_tuners.ignore_nice) { /* nothing to do */
return count;
}
- dbs_tuners_ins.ignore_nice = input;
+ od_tuners.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
- struct cpu_dbs_info_s *dbs_info;
+ struct od_cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(od_cpu_dbs_info, j);
- dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall);
+ if (od_tuners.ignore_nice)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- dbs_tuners_ins.powersave_bias = input;
+ od_tuners.powersave_bias = input;
ondemand_powersave_bias_init();
return count;
}
+show_one(od, sampling_rate, sampling_rate);
+show_one(od, io_is_busy, io_is_busy);
+show_one(od, up_threshold, up_threshold);
+show_one(od, sampling_down_factor, sampling_down_factor);
+show_one(od, ignore_nice_load, ignore_nice);
+show_one(od, powersave_bias, powersave_bias);
+
define_one_global_rw(sampling_rate);
define_one_global_rw(io_is_busy);
define_one_global_rw(up_threshold);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
+define_one_global_ro(sampling_rate_min);
static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
@@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = {
NULL
};
-static struct attribute_group dbs_attr_group = {
+static struct attribute_group od_attr_group = {
.attrs = dbs_attributes,
.name = "ondemand",
};
/************************** sysfs end ************************/
-static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
-{
- if (dbs_tuners_ins.powersave_bias)
- freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
- else if (p->cur == p->max)
- return;
-
- __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
- CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
-}
-
-static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
-{
- unsigned int max_load_freq;
-
- struct cpufreq_policy *policy;
- unsigned int j;
-
- this_dbs_info->freq_lo = 0;
- policy = this_dbs_info->cur_policy;
-
- /*
- * Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
- * Every sampling_rate, we look for a the lowest
- * frequency which can sustain the load while keeping idle time over
- * 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of current frequency
- */
-
- /* Get Absolute Load - in terms of freq */
- max_load_freq = 0;
-
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
- unsigned int idle_time, wall_time, iowait_time;
- unsigned int load, load_freq;
- int freq_avg;
-
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
-
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
-
- wall_time = (unsigned int)
- (cur_wall_time - j_dbs_info->prev_cpu_wall);
- j_dbs_info->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_dbs_info->prev_cpu_idle);
- j_dbs_info->prev_cpu_idle = cur_idle_time;
-
- iowait_time = (unsigned int)
- (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
- j_dbs_info->prev_cpu_iowait = cur_iowait_time;
-
- if (dbs_tuners_ins.ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- j_dbs_info->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
-
- j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
- }
-
- /*
- * For the purpose of ondemand, waiting for disk IO is an
- * indication that you're performance critical, and not that
- * the system is actually idle. So subtract the iowait time
- * from the cpu idle time.
- */
-
- if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
- idle_time -= iowait_time;
-
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- load = 100 * (wall_time - idle_time) / wall_time;
-
- freq_avg = __cpufreq_driver_getavg(policy, j);
- if (freq_avg <= 0)
- freq_avg = policy->cur;
-
- load_freq = load * freq_avg;
- if (load_freq > max_load_freq)
- max_load_freq = load_freq;
- }
+define_get_cpu_dbs_routines(od_cpu_dbs_info);
- /* Check for frequency increase */
- if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
- /* If switching to max speed, apply sampling_down_factor */
- if (policy->cur < policy->max)
- this_dbs_info->rate_mult =
- dbs_tuners_ins.sampling_down_factor;
- dbs_freq_increase(policy, policy->max);
- return;
- }
-
- /* Check for frequency decrease */
- /* if we cannot reduce the frequency anymore, break out early */
- if (policy->cur == policy->min)
- return;
-
- /*
- * The optimal frequency is the frequency that is the lowest that
- * can support the current CPU usage without triggering the up
- * policy. To be safe, we focus 10 points under the threshold.
- */
- if (max_load_freq <
- (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
- policy->cur) {
- unsigned int freq_next;
- freq_next = max_load_freq /
- (dbs_tuners_ins.up_threshold -
- dbs_tuners_ins.down_differential);
-
- /* No longer fully busy, reset rate_mult */
- this_dbs_info->rate_mult = 1;
-
- if (freq_next < policy->min)
- freq_next = policy->min;
-
- if (!dbs_tuners_ins.powersave_bias) {
- __cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- } else {
- int freq = powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq,
- CPUFREQ_RELATION_L);
- }
- }
-}
-
-static void do_dbs_timer(struct work_struct *work)
-{
- struct cpu_dbs_info_s *dbs_info =
- container_of(work, struct cpu_dbs_info_s, work.work);
- unsigned int cpu = dbs_info->cpu;
- int sample_type = dbs_info->sample_type;
-
- int delay;
-
- mutex_lock(&dbs_info->timer_mutex);
-
- /* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- if (!dbs_tuners_ins.powersave_bias ||
- sample_type == DBS_NORMAL_SAMPLE) {
- dbs_check_cpu(dbs_info);
- if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = DBS_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
- } else {
- /* We want all CPUs to do sampling nearly on
- * same jiffy
- */
- delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
- }
- } else {
- __cpufreq_driver_target(dbs_info->cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
- delay = dbs_info->freq_lo_jiffies;
- }
- schedule_delayed_work_on(cpu, &dbs_info->work, delay);
- mutex_unlock(&dbs_info->timer_mutex);
-}
-
-static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
-{
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+static struct od_ops od_ops = {
+ .io_busy = should_io_be_busy,
+ .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
+ .powersave_bias_target = powersave_bias_target,
+ .freq_increase = dbs_freq_increase,
+};
- dbs_info->sample_type = DBS_NORMAL_SAMPLE;
- INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
-}
+static struct dbs_data od_dbs_data = {
+ .governor = GOV_ONDEMAND,
+ .attr_group = &od_attr_group,
+ .tuners = &od_tuners,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = od_dbs_timer,
+ .gov_check_cpu = od_check_cpu,
+ .gov_ops = &od_ops,
+};
-static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
{
- cancel_delayed_work_sync(&dbs_info->work);
+ return cpufreq_governor_dbs(&od_dbs_data, policy, event);
}
-/*
- * Not all CPUs want IO time to be accounted as busy; this dependson how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
#endif
- return 0;
-}
-
-static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
-{
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info_s *this_dbs_info;
- unsigned int j;
- int rc;
-
- this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- switch (event) {
- case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
- return -EINVAL;
-
- mutex_lock(&dbs_mutex);
-
- dbs_enable++;
- for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info_s *j_dbs_info;
- j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
- j_dbs_info->cur_policy = policy;
-
- j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
- &j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice)
- j_dbs_info->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- this_dbs_info->cpu = cpu;
- this_dbs_info->rate_mult = 1;
- ondemand_powersave_bias_init_cpu(cpu);
- /*
- * Start the timerschedule work, when this governor
- * is used for first time
- */
- if (dbs_enable == 1) {
- unsigned int latency;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- &dbs_attr_group);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
- /* Bring kernel and HW constraints together */
- min_sampling_rate = max(min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_tuners_ins.sampling_rate =
- max(min_sampling_rate,
- latency * LATENCY_MULTIPLIER);
- dbs_tuners_ins.io_is_busy = should_io_be_busy();
- }
- mutex_unlock(&dbs_mutex);
-
- mutex_init(&this_dbs_info->timer_mutex);
- dbs_timer_init(this_dbs_info);
- break;
-
- case CPUFREQ_GOV_STOP:
- dbs_timer_exit(this_dbs_info);
-
- mutex_lock(&dbs_mutex);
- mutex_destroy(&this_dbs_info->timer_mutex);
- dbs_enable--;
- mutex_unlock(&dbs_mutex);
- if (!dbs_enable)
- sysfs_remove_group(cpufreq_global_kobject,
- &dbs_attr_group);
-
- break;
-
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&this_dbs_info->timer_mutex);
- if (policy->max < this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > this_dbs_info->cur_policy->cur)
- __cpufreq_driver_target(this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
- dbs_check_cpu(this_dbs_info);
- mutex_unlock(&this_dbs_info->timer_mutex);
- break;
- }
- return 0;
-}
+struct cpufreq_governor cpufreq_gov_ondemand = {
+ .name = "ondemand",
+ .governor = od_cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
static int __init cpufreq_gov_dbs_init(void)
{
u64 idle_time;
int cpu = get_cpu();
+ mutex_init(&od_dbs_data.mutex);
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
- dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- dbs_tuners_ins.down_differential =
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
- min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
/* For correct statistics, we need 10 ticks for each measure */
- min_sampling_rate =
- MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
+ od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
}
return cpufreq_register_governor(&cpufreq_gov_ondemand);
@@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
}
-
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index f13a8a9af6a1..ceee06849b91 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 4c2eb512f2bc..2d948a171155 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 399831690fed..e40e50809644 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -37,7 +37,7 @@ struct cpufreq_stats {
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
- cputime64_t *time_in_state;
+ u64 *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
@@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
count++;
}
- alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
+ alloc_size = count * sizeof(int) + count * sizeof(u64);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index bedac1aa9be3..c8c3d293cc57 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -11,6 +11,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/smp.h>
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index af2d81e10f71..7012ea8bf1e7 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -31,13 +31,13 @@ static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
-int exynos_verify_speed(struct cpufreq_policy *policy)
+static int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
exynos_info->freq_table);
}
-unsigned int exynos_getspeed(unsigned int cpu)
+static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
@@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy,
}
arm_volt = volt_table[index];
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy,
if (freqs.new != freqs.old)
exynos_info->set_freq(old_index, index);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
@@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_online_mask);
} else {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 90431cb92804..49cda256efb2 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 53ddbc760af7..f1fa500ac105 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
+static int longhaul_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
@@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = {
.target = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
- .exit = __devexit_p(longhaul_cpu_exit),
+ .exit = longhaul_cpu_exit,
.name = "longhaul",
.owner = THIS_MODULE,
.attr = longhaul_attr,
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e3ebb4fa2c3e..056faf6af1a9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1186,7 +1186,7 @@ err_out:
return -ENODEV;
}
-static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
+static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
@@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.target = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
- .exit = __devexit_p(powernowk8_cpu_exit),
+ .exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
.owner = THIS_MODULE,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
new file mode 100644
index 000000000000..4575cfe41755
--- /dev/null
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -0,0 +1,291 @@
+/*
+ * drivers/cpufreq/spear-cpufreq.c
+ *
+ * CPU Frequency Scaling for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* SPEAr CPUFreq driver data structure */
+static struct {
+ struct clk *clk;
+ unsigned int transition_latency;
+ struct cpufreq_frequency_table *freq_tbl;
+ u32 cnt;
+} spear_cpufreq;
+
+int spear_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
+}
+
+static unsigned int spear_cpufreq_get(unsigned int cpu)
+{
+ return clk_get_rate(spear_cpufreq.clk) / 1000;
+}
+
+static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
+{
+ struct clk *sys_pclk;
+ int pclk;
+ /*
+ * In SPEAr1340, cpu clk's parent sys clk can take input from
+ * following sources
+ */
+ const char *sys_clk_src[] = {
+ "sys_syn_clk",
+ "pll1_clk",
+ "pll2_clk",
+ "pll3_clk",
+ };
+
+ /*
+ * As sys clk can have multiple source with their own range
+ * limitation so we choose possible sources accordingly
+ */
+ if (newfreq <= 300000000)
+ pclk = 0; /* src is sys_syn_clk */
+ else if (newfreq > 300000000 && newfreq <= 500000000)
+ pclk = 3; /* src is pll3_clk */
+ else if (newfreq == 600000000)
+ pclk = 1; /* src is pll1_clk */
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Get parent to sys clock */
+ sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
+ if (IS_ERR(sys_pclk))
+ pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
+
+ return sys_pclk;
+}
+
+/*
+ * In SPEAr1340, we cannot use newfreq directly because we need to actually
+ * access a source clock (clk) which might not be ancestor of cpu at present.
+ * Hence in SPEAr1340 we would operate on source clock directly before switching
+ * cpu clock to it.
+ */
+static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
+{
+ struct clk *sys_clk;
+ int ret = 0;
+
+ sys_clk = clk_get_parent(spear_cpufreq.clk);
+ if (IS_ERR(sys_clk)) {
+ pr_err("failed to get cpu's parent (sys) clock\n");
+ return PTR_ERR(sys_clk);
+ }
+
+ /* Set the rate of the source clock before changing the parent */
+ ret = clk_set_rate(sys_pclk, newfreq);
+ if (ret) {
+ pr_err("Failed to set sys clk rate to %lu\n", newfreq);
+ return ret;
+ }
+
+ ret = clk_set_parent(sys_clk, sys_pclk);
+ if (ret) {
+ pr_err("Failed to set sys clk parent\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spear_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned long newfreq;
+ struct clk *srcclk;
+ int index, ret, mult = 1;
+
+ if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ freqs.cpu = policy->cpu;
+ freqs.old = spear_cpufreq_get(0);
+
+ newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+ if (of_machine_is_compatible("st,spear1340")) {
+ /*
+ * SPEAr1340 is special in the sense that due to the possibility
+ * of multiple clock sources for cpu clk's parent we can have
+ * different clock source for different frequency of cpu clk.
+ * Hence we need to choose one from amongst these possible clock
+ * sources.
+ */
+ srcclk = spear1340_cpu_get_possible_parent(newfreq);
+ if (IS_ERR(srcclk)) {
+ pr_err("Failed to get src clk\n");
+ return PTR_ERR(srcclk);
+ }
+
+ /* SPEAr1340: src clk is always 2 * intended cpu clk */
+ mult = 2;
+ } else {
+ /*
+ * src clock to be altered is ancestor of cpu clock. Hence we
+ * can directly work on cpu clk
+ */
+ srcclk = spear_cpufreq.clk;
+ }
+
+ newfreq = clk_round_rate(srcclk, newfreq * mult);
+ if (newfreq < 0) {
+ pr_err("clk_round_rate failed for cpu src clock\n");
+ return newfreq;
+ }
+
+ freqs.new = newfreq / 1000;
+ freqs.new /= mult;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (mult == 2)
+ ret = spear1340_set_cpu_rate(srcclk, newfreq);
+ else
+ ret = clk_set_rate(spear_cpufreq.clk, newfreq);
+
+ /* Get current rate after clk_set_rate, in case of failure */
+ if (ret) {
+ pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
+ freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ return ret;
+}
+
+static int spear_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
+ if (ret) {
+ pr_err("cpufreq_frequency_table_cpuinfo() failed");
+ return ret;
+ }
+
+ cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
+ policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
+ policy->cur = spear_cpufreq_get(0);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ return 0;
+}
+
+static int spear_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *spear_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver spear_cpufreq_driver = {
+ .name = "cpufreq-spear",
+ .flags = CPUFREQ_STICKY,
+ .verify = spear_cpufreq_verify,
+ .target = spear_cpufreq_target,
+ .get = spear_cpufreq_get,
+ .init = spear_cpufreq_init,
+ .exit = spear_cpufreq_exit,
+ .attr = spear_cpufreq_attr,
+};
+
+static int spear_cpufreq_driver_init(void)
+{
+ struct device_node *np;
+ const struct property *prop;
+ struct cpufreq_frequency_table *freq_tbl;
+ const __be32 *val;
+ int cnt, i, ret;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ pr_err("No cpu node found");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "clock-latency",
+ &spear_cpufreq.transition_latency))
+ spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+
+ prop = of_find_property(np, "cpufreq_tbl", NULL);
+ if (!prop || !prop->value) {
+ pr_err("Invalid cpufreq_tbl");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cnt = prop->length / sizeof(u32);
+ val = prop->value;
+
+ freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+ if (!freq_tbl) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ freq_tbl[i].index = i;
+ freq_tbl[i].frequency = be32_to_cpup(val++);
+ }
+
+ freq_tbl[i].index = i;
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ spear_cpufreq.freq_tbl = freq_tbl;
+
+ of_node_put(np);
+
+ spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(spear_cpufreq.clk)) {
+ pr_err("Unable to get CPU clock\n");
+ ret = PTR_ERR(spear_cpufreq.clk);
+ goto out_put_mem;
+ }
+
+ ret = cpufreq_register_driver(&spear_cpufreq_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("failed register driver: %d\n", ret);
+ clk_put(spear_cpufreq.clk);
+
+out_put_mem:
+ kfree(freq_tbl);
+ return ret;
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+late_initcall(spear_cpufreq_driver_init);
+
+MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
+MODULE_DESCRIPTION("SPEAr CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index a76b689e553b..234ae651b38f 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -9,6 +9,15 @@ config CPU_IDLE
If you're using an ACPI-enabled platform, you should say Y here.
+config CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Support multiple cpuidle drivers"
+ depends on CPU_IDLE
+ default n
+ help
+ Allows the cpuidle framework to use different drivers for each CPU.
+ This is useful if you have a system with different CPU latencies and
+ states. If unsure say N.
+
config CPU_IDLE_GOV_LADDER
bool
depends on CPU_IDLE
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f15b8514a18..8df53dd8dbe1 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -68,7 +68,7 @@ static cpuidle_enter_t cpuidle_enter_ops;
int cpuidle_play_dead(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int i, dead_state = -1;
int power_usage = -1;
@@ -109,8 +109,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* This can be moved to within driver enter routine
* but that results in multiple copies of same code.
*/
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
} else {
dev->last_residency = 0;
@@ -128,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
int next_state, entered_state;
if (off)
@@ -141,9 +140,15 @@ int cpuidle_idle_call(void)
if (!dev || !dev->enabled)
return -EBUSY;
+ drv = cpuidle_get_cpu_driver(dev);
+
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
+ dev->last_residency = 0;
+ /* give the governor an opportunity to reflect on the outcome */
+ if (cpuidle_curr_governor->reflect)
+ cpuidle_curr_governor->reflect(dev, next_state);
local_irq_enable();
return 0;
}
@@ -308,15 +313,19 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
int cpuidle_enable_device(struct cpuidle_device *dev)
{
int ret, i;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv;
if (!dev)
return -EINVAL;
if (dev->enabled)
return 0;
+
+ drv = cpuidle_get_cpu_driver(dev);
+
if (!drv || !cpuidle_curr_governor)
return -EIO;
+
if (!dev->state_count)
dev->state_count = drv->state_count;
@@ -331,7 +340,8 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
poll_idle_init(drv);
- if ((ret = cpuidle_add_state_sysfs(dev)))
+ ret = cpuidle_add_device_sysfs(dev);
+ if (ret)
return ret;
if (cpuidle_curr_governor->enable &&
@@ -352,7 +362,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return 0;
fail_sysfs:
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
return ret;
}
@@ -368,17 +378,20 @@ EXPORT_SYMBOL_GPL(cpuidle_enable_device);
*/
void cpuidle_disable_device(struct cpuidle_device *dev)
{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
if (!dev || !dev->enabled)
return;
- if (!cpuidle_get_driver() || !cpuidle_curr_governor)
+
+ if (!drv || !cpuidle_curr_governor)
return;
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
- cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
+ cpuidle_curr_governor->disable(drv, dev);
- cpuidle_remove_state_sysfs(dev);
+ cpuidle_remove_device_sysfs(dev);
enabled_devices--;
}
@@ -394,17 +407,14 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
- if (!try_module_get(cpuidle_driver->owner))
+ if (!try_module_get(drv->owner))
return -EINVAL;
- init_completion(&dev->kobj_unregister);
-
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- ret = cpuidle_add_sysfs(cpu_dev);
+ ret = cpuidle_add_sysfs(dev);
if (ret)
goto err_sysfs;
@@ -416,12 +426,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
return 0;
err_coupled:
- cpuidle_remove_sysfs(cpu_dev);
- wait_for_completion(&dev->kobj_unregister);
+ cpuidle_remove_sysfs(dev);
err_sysfs:
list_del(&dev->device_list);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
return ret;
}
@@ -460,8 +469,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
- struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
if (dev->registered == 0)
return;
@@ -470,16 +478,15 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(cpu_dev);
+ cpuidle_remove_sysfs(dev);
list_del(&dev->device_list);
- wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
cpuidle_coupled_unregister_device(dev);
cpuidle_resume_and_unlock();
- module_put(cpuidle_driver->owner);
+ module_put(drv->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 76e7f696ad8c..ee97e9672ecf 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,8 +5,6 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/device.h>
-
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct list_head cpuidle_governors;
@@ -25,12 +23,15 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
+
+struct device;
+
extern int cpuidle_add_interface(struct device *dev);
extern void cpuidle_remove_interface(struct device *dev);
-extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
-extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct device *dev);
-extern void cpuidle_remove_sysfs(struct device *dev);
+extern int cpuidle_add_device_sysfs(struct cpuidle_device *device);
+extern void cpuidle_remove_device_sysfs(struct cpuidle_device *device);
+extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
+extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 87db3877fead..3af841fb397a 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,9 +14,10 @@
#include "cpuidle.h"
-static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
-int cpuidle_driver_refcount;
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
+static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
static void set_power_states(struct cpuidle_driver *drv)
{
@@ -40,11 +41,15 @@ static void set_power_states(struct cpuidle_driver *drv)
drv->states[i].power_usage = -1 - i;
}
-/**
- * cpuidle_register_driver - registers a driver
- * @drv: the driver
- */
-int cpuidle_register_driver(struct cpuidle_driver *drv)
+static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+{
+ drv->refcnt = 0;
+
+ if (!drv->power_specified)
+ set_power_states(drv);
+}
+
+static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
{
if (!drv || !drv->state_count)
return -EINVAL;
@@ -52,31 +57,145 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (cpuidle_disabled())
return -ENODEV;
- spin_lock(&cpuidle_driver_lock);
- if (cpuidle_curr_driver) {
- spin_unlock(&cpuidle_driver_lock);
+ if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY;
+
+ __cpuidle_driver_init(drv);
+
+ __cpuidle_set_cpu_driver(drv, cpu);
+
+ return 0;
+}
+
+static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
+{
+ if (drv != __cpuidle_get_cpu_driver(cpu))
+ return;
+
+ if (!WARN_ON(drv->refcnt > 0))
+ __cpuidle_set_cpu_driver(NULL, cpu);
+}
+
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+
+static DEFINE_PER_CPU(struct cpuidle_driver *, cpuidle_drivers);
+
+static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ per_cpu(cpuidle_drivers, cpu) = drv;
+}
+
+static struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
+{
+ return per_cpu(cpuidle_drivers, cpu);
+}
+
+static void __cpuidle_unregister_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int cpu;
+ for_each_present_cpu(cpu)
+ __cpuidle_unregister_driver(drv, cpu);
+}
+
+static int __cpuidle_register_all_cpu_driver(struct cpuidle_driver *drv)
+{
+ int ret = 0;
+ int i, cpu;
+
+ for_each_present_cpu(cpu) {
+ ret = __cpuidle_register_driver(drv, cpu);
+ if (ret)
+ break;
}
- if (!drv->power_specified)
- set_power_states(drv);
+ if (ret)
+ for_each_present_cpu(i) {
+ if (i == cpu)
+ break;
+ __cpuidle_unregister_driver(drv, i);
+ }
- cpuidle_curr_driver = drv;
+ return ret;
+}
+
+int cpuidle_register_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
spin_unlock(&cpuidle_driver_lock);
- return 0;
+ return ret;
+}
+
+void cpuidle_unregister_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
+}
+
+/**
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
+ */
+int cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+ int ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
- * cpuidle_get_driver - return the current driver
+ * cpuidle_unregister_driver - unregisters a driver
+ * @drv: the driver
*/
-struct cpuidle_driver *cpuidle_get_driver(void)
+void cpuidle_unregister_driver(struct cpuidle_driver *drv)
+{
+ spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_all_cpu_driver(drv);
+ spin_unlock(&cpuidle_driver_lock);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+
+#else
+
+static struct cpuidle_driver *cpuidle_curr_driver;
+
+static inline void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu)
+{
+ cpuidle_curr_driver = drv;
+}
+
+static inline struct cpuidle_driver *__cpuidle_get_cpu_driver(int cpu)
{
return cpuidle_curr_driver;
}
-EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
+ * cpuidle_register_driver - registers a driver
+ * @drv: the driver
+ */
+int cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+ int ret, cpu;
+
+ cpu = get_cpu();
+ spin_lock(&cpuidle_driver_lock);
+ ret = __cpuidle_register_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register_driver);
/**
* cpuidle_unregister_driver - unregisters a driver
@@ -84,20 +203,50 @@ EXPORT_SYMBOL_GPL(cpuidle_get_driver);
*/
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
{
- if (drv != cpuidle_curr_driver) {
- WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
- drv->name);
- return;
- }
+ int cpu;
+ cpu = get_cpu();
spin_lock(&cpuidle_driver_lock);
+ __cpuidle_unregister_driver(drv, cpu);
+ spin_unlock(&cpuidle_driver_lock);
+ put_cpu();
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+#endif
+
+/**
+ * cpuidle_get_driver - return the current driver
+ */
+struct cpuidle_driver *cpuidle_get_driver(void)
+{
+ struct cpuidle_driver *drv;
+ int cpu;
- if (!WARN_ON(cpuidle_driver_refcount > 0))
- cpuidle_curr_driver = NULL;
+ cpu = get_cpu();
+ drv = __cpuidle_get_cpu_driver(cpu);
+ put_cpu();
+ return drv;
+}
+EXPORT_SYMBOL_GPL(cpuidle_get_driver);
+
+/**
+ * cpuidle_get_cpu_driver - return the driver tied with a cpu
+ */
+struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver *drv;
+
+ if (!dev)
+ return NULL;
+
+ spin_lock(&cpuidle_driver_lock);
+ drv = __cpuidle_get_cpu_driver(dev->cpu);
spin_unlock(&cpuidle_driver_lock);
+
+ return drv;
}
-EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver);
struct cpuidle_driver *cpuidle_driver_ref(void)
{
@@ -105,8 +254,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
spin_lock(&cpuidle_driver_lock);
- drv = cpuidle_curr_driver;
- cpuidle_driver_refcount++;
+ drv = cpuidle_get_driver();
+ drv->refcnt++;
spin_unlock(&cpuidle_driver_lock);
return drv;
@@ -114,10 +263,12 @@ struct cpuidle_driver *cpuidle_driver_ref(void)
void cpuidle_driver_unref(void)
{
+ struct cpuidle_driver *drv = cpuidle_get_driver();
+
spin_lock(&cpuidle_driver_lock);
- if (!WARN_ON(cpuidle_driver_refcount <= 0))
- cpuidle_driver_refcount--;
+ if (drv && !WARN_ON(drv->refcnt <= 0))
+ drv->refcnt--;
spin_unlock(&cpuidle_driver_lock);
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 5b1f2c372c1f..bd40b943b6db 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -28,6 +28,13 @@
#define MAX_INTERESTING 50000
#define STDDEV_THRESH 400
+/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
+#define MAX_DEVIATION 60
+
+static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
+static DEFINE_PER_CPU(int, hrtimer_status);
+/* menu hrtimer mode */
+enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/*
* Concepts and ideas behind the menu governor
@@ -109,6 +116,13 @@
*
*/
+/*
+ * The C-state residency is so long that is is worthwhile to exit
+ * from the shallow C-state and re-enter into a deeper C-state.
+ */
+static unsigned int perfect_cstate_ms __read_mostly = 30;
+module_param(perfect_cstate_ms, uint, 0000);
+
struct menu_device {
int last_state_idx;
int needs_update;
@@ -191,40 +205,102 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
+/* Cancel the hrtimer if it is not triggered yet */
+void menu_hrtimer_cancel(void)
+{
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
+
+ /* The timer is still not time out*/
+ if (per_cpu(hrtimer_status, cpu)) {
+ hrtimer_cancel(hrtmr);
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+ }
+}
+EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
+
+/* Call back for hrtimer is triggered */
+static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
+{
+ int cpu = smp_processor_id();
+ struct menu_device *data = &per_cpu(menu_devices, cpu);
+
+ /* In general case, the expected residency is much larger than
+ * deepest C-state target residency, but prediction logic still
+ * predicts a small predicted residency, so the prediction
+ * history is totally broken if the timer is triggered.
+ * So reset the correction factor.
+ */
+ if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
+ data->correction_factor[data->bucket] = RESOLUTION * DECAY;
+
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
+
+ return HRTIMER_NORESTART;
+}
+
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static void detect_repeating_patterns(struct menu_device *data)
+static u32 get_typical_interval(struct menu_device *data)
{
- int i;
- uint64_t avg = 0;
- uint64_t stddev = 0; /* contains the square of the std deviation */
-
- /* first calculate average and standard deviation of the past */
- for (i = 0; i < INTERVALS; i++)
- avg += data->intervals[i];
- avg = avg / INTERVALS;
+ int i = 0, divisor = 0;
+ uint64_t max = 0, avg = 0, stddev = 0;
+ int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
+ unsigned int ret = 0;
- /* if the avg is beyond the known next tick, it's worthless */
- if (avg > data->expected_us)
- return;
+again:
- for (i = 0; i < INTERVALS; i++)
- stddev += (data->intervals[i] - avg) *
- (data->intervals[i] - avg);
-
- stddev = stddev / INTERVALS;
+ /* first calculate average and standard deviation of the past */
+ max = avg = divisor = stddev = 0;
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ avg += value;
+ divisor++;
+ if (value > max)
+ max = value;
+ }
+ }
+ do_div(avg, divisor);
+ for (i = 0; i < INTERVALS; i++) {
+ int64_t value = data->intervals[i];
+ if (value <= thresh) {
+ int64_t diff = value - avg;
+ stddev += diff * diff;
+ }
+ }
+ do_div(stddev, divisor);
+ stddev = int_sqrt(stddev);
/*
- * now.. if stddev is small.. then assume we have a
- * repeating pattern and predict we keep doing this.
+ * If we have outliers to the upside in our distribution, discard
+ * those by setting the threshold to exclude these outliers, then
+ * calculate the average and standard deviation again. Once we get
+ * down to the bottom 3/4 of our samples, stop excluding samples.
+ *
+ * This can deal with workloads that have long pauses interspersed
+ * with sporadic activity with a bunch of short pauses.
+ *
+ * The typical interval is obtained when standard deviation is small
+ * or standard deviation is small compared to the average interval.
*/
-
- if (avg && stddev < STDDEV_THRESH)
+ if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
+ || stddev <= 20) {
data->predicted_us = avg;
+ ret = 1;
+ return ret;
+
+ } else if ((divisor * 4) > INTERVALS * 3) {
+ /* Exclude the max interval */
+ thresh = max - 1;
+ goto again;
+ }
+
+ return ret;
}
/**
@@ -240,6 +316,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int i;
int multiplier;
struct timespec t;
+ int repeat = 0, low_predicted = 0;
+ int cpu = smp_processor_id();
+ struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) {
menu_update(drv, dev);
@@ -274,7 +353,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- detect_repeating_patterns(data);
+ repeat = get_typical_interval(data);
/*
* We want to default to C1 (hlt), not to busy polling
@@ -295,8 +374,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
- if (s->target_residency > data->predicted_us)
+ if (s->target_residency > data->predicted_us) {
+ low_predicted = 1;
continue;
+ }
if (s->exit_latency > latency_req)
continue;
if (s->exit_latency * multiplier > data->predicted_us)
@@ -309,6 +390,44 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
+ /* not deepest C-state chosen for low predicted residency */
+ if (low_predicted) {
+ unsigned int timer_us = 0;
+ unsigned int perfect_us = 0;
+
+ /*
+ * Set a timer to detect whether this sleep is much
+ * longer than repeat mode predicted. If the timer
+ * triggers, the code will evaluate whether to put
+ * the CPU into a deeper C-state.
+ * The timer is cancelled on CPU wakeup.
+ */
+ timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
+
+ perfect_us = perfect_cstate_ms * 1000;
+
+ if (repeat && (4 * timer_us < data->expected_us)) {
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In repeat case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
+ } else if (perfect_us < data->expected_us) {
+ /*
+ * The next timer is long. This could be because
+ * we did not make a useful prediction.
+ * In that case, it makes sense to re-enter
+ * into a deeper C-state after some time.
+ */
+ RCU_NONIDLE(hrtimer_start(hrtmr,
+ ns_to_ktime(1000 * timer_us),
+ HRTIMER_MODE_REL_PINNED));
+ /* In general case, menu hrtimer is started */
+ per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
+ }
+
+ }
+
return data->last_state_idx;
}
@@ -399,6 +518,9 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
+ struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device));
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 5f809e337b89..340942946106 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/capability.h>
+#include <linux/device.h>
#include "cpuidle.h"
@@ -297,6 +298,13 @@ static struct attribute *cpuidle_state_default_attrs[] = {
NULL
};
+struct cpuidle_state_kobj {
+ struct cpuidle_state *state;
+ struct cpuidle_state_usage *state_usage;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
@@ -356,17 +364,17 @@ static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
}
/**
- * cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes
+ * cpuidle_add_state_sysfs - adds cpuidle states sysfs attributes
* @device: the target device
*/
-int cpuidle_add_state_sysfs(struct cpuidle_device *device)
+static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
- struct cpuidle_driver *drv = cpuidle_get_driver();
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
- for (i = 0; i < device->state_count; i++) {
+ for (i = 0; i < drv->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
@@ -374,8 +382,8 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
- ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
- "state%d", i);
+ ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
+ &device->kobj, "state%d", i);
if (ret) {
kfree(kobj);
goto error_state;
@@ -393,10 +401,10 @@ error_state:
}
/**
- * cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes
+ * cpuidle_remove_driver_sysfs - removes the cpuidle states sysfs attributes
* @device: the target device
*/
-void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
+static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
{
int i;
@@ -404,17 +412,179 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
cpuidle_free_state_kobj(device, i);
}
+#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
+#define kobj_to_driver_kobj(k) container_of(k, struct cpuidle_driver_kobj, kobj)
+#define attr_to_driver_attr(a) container_of(a, struct cpuidle_driver_attr, attr)
+
+#define define_one_driver_ro(_name, show) \
+ static struct cpuidle_driver_attr attr_driver_##_name = \
+ __ATTR(_name, 0644, show, NULL)
+
+struct cpuidle_driver_kobj {
+ struct cpuidle_driver *drv;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
+struct cpuidle_driver_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpuidle_driver *, char *);
+ ssize_t (*store)(struct cpuidle_driver *, const char *, size_t);
+};
+
+static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
+{
+ ssize_t ret;
+
+ spin_lock(&cpuidle_driver_lock);
+ ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ spin_unlock(&cpuidle_driver_lock);
+
+ return ret;
+}
+
+static void cpuidle_driver_sysfs_release(struct kobject *kobj)
+{
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ complete(&driver_kobj->kobj_unregister);
+}
+
+static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
+ char * buf)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->show)
+ ret = dattr->show(driver_kobj->drv, buf);
+
+ return ret;
+}
+
+static ssize_t cpuidle_driver_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = -EIO;
+ struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
+ struct cpuidle_driver_attr *dattr = attr_to_driver_attr(attr);
+
+ if (dattr->store)
+ ret = dattr->store(driver_kobj->drv, buf, size);
+
+ return ret;
+}
+
+define_one_driver_ro(name, show_driver_name);
+
+static const struct sysfs_ops cpuidle_driver_sysfs_ops = {
+ .show = cpuidle_driver_show,
+ .store = cpuidle_driver_store,
+};
+
+static struct attribute *cpuidle_driver_default_attrs[] = {
+ &attr_driver_name.attr,
+ NULL
+};
+
+static struct kobj_type ktype_driver_cpuidle = {
+ .sysfs_ops = &cpuidle_driver_sysfs_ops,
+ .default_attrs = cpuidle_driver_default_attrs,
+ .release = cpuidle_driver_sysfs_release,
+};
+
+/**
+ * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute
+ * @device: the target device
+ */
+static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv;
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int ret;
+
+ kdrv = kzalloc(sizeof(*kdrv), GFP_KERNEL);
+ if (!kdrv)
+ return -ENOMEM;
+
+ kdrv->drv = drv;
+ init_completion(&kdrv->kobj_unregister);
+
+ ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
+ &dev->kobj, "driver");
+ if (ret) {
+ kfree(kdrv);
+ return ret;
+ }
+
+ kobject_uevent(&kdrv->kobj, KOBJ_ADD);
+ dev->kobj_driver = kdrv;
+
+ return ret;
+}
+
+/**
+ * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute
+ * @device: the target device
+ */
+static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver_kobj *kdrv = dev->kobj_driver;
+ kobject_put(&kdrv->kobj);
+ wait_for_completion(&kdrv->kobj_unregister);
+ kfree(kdrv);
+}
+#else
+static inline int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev)
+{
+ ;
+}
+#endif
+
+/**
+ * cpuidle_add_device_sysfs - adds device specific sysfs attributes
+ * @device: the target device
+ */
+int cpuidle_add_device_sysfs(struct cpuidle_device *device)
+{
+ int ret;
+
+ ret = cpuidle_add_state_sysfs(device);
+ if (ret)
+ return ret;
+
+ ret = cpuidle_add_driver_sysfs(device);
+ if (ret)
+ cpuidle_remove_state_sysfs(device);
+ return ret;
+}
+
+/**
+ * cpuidle_remove_device_sysfs : removes device specific sysfs attributes
+ * @device : the target device
+ */
+void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
+{
+ cpuidle_remove_driver_sysfs(device);
+ cpuidle_remove_state_sysfs(device);
+}
+
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
* @dev: the target device
*/
-int cpuidle_add_sysfs(struct device *cpu_dev)
+int cpuidle_add_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
- dev = per_cpu(cpuidle_devices, cpu);
+ init_completion(&dev->kobj_unregister);
+
error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
@@ -426,11 +596,8 @@ int cpuidle_add_sysfs(struct device *cpu_dev)
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
* @dev: the target device
*/
-void cpuidle_remove_sysfs(struct device *cpu_dev)
+void cpuidle_remove_sysfs(struct cpuidle_device *dev)
{
- int cpu = cpu_dev->id;
- struct cpuidle_device *dev;
-
- dev = per_cpu(cpuidle_devices, cpu);
kobject_put(&dev->kobj);
+ wait_for_completion(&dev->kobj_unregister);
}
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index f6b0a6e2ea50..0f079be13305 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -30,7 +30,7 @@ if PM_DEVFREQ
comment "DEVFREQ Governors"
config DEVFREQ_GOV_SIMPLE_ONDEMAND
- bool "Simple Ondemand"
+ tristate "Simple Ondemand"
help
Chooses frequency based on the recent load on the device. Works
similar as ONDEMAND governor of CPUFREQ does. A device with
@@ -39,7 +39,7 @@ config DEVFREQ_GOV_SIMPLE_ONDEMAND
values to the governor with data field at devfreq_add_device().
config DEVFREQ_GOV_PERFORMANCE
- bool "Performance"
+ tristate "Performance"
help
Sets the frequency at the maximum available frequency.
This governor always returns UINT_MAX as frequency so that
@@ -47,7 +47,7 @@ config DEVFREQ_GOV_PERFORMANCE
at any time.
config DEVFREQ_GOV_POWERSAVE
- bool "Powersave"
+ tristate "Powersave"
help
Sets the frequency at the minimum available frequency.
This governor always returns 0 as frequency so that
@@ -55,7 +55,7 @@ config DEVFREQ_GOV_POWERSAVE
at any time.
config DEVFREQ_GOV_USERSPACE
- bool "Userspace"
+ tristate "Userspace"
help
Sets the frequency at the user specified one.
This governor returns the user configured frequency if there
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index b146d76f04cf..53766f39aadd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -27,21 +27,17 @@
#include <linux/hrtimer.h>
#include "governor.h"
-struct class *devfreq_class;
+static struct class *devfreq_class;
/*
- * devfreq_work periodically monitors every registered device.
- * The minimum polling interval is one jiffy. The polling interval is
- * determined by the minimum polling period among all polling devfreq
- * devices. The resolution of polling interval is one jiffy.
+ * devfreq core provides delayed work based load monitoring helper
+ * functions. Governors can use these or can implement their own
+ * monitoring mechanism.
*/
-static bool polling;
static struct workqueue_struct *devfreq_wq;
-static struct delayed_work devfreq_work;
-
-/* wait removing if this is to be removed */
-static struct devfreq *wait_remove_device;
+/* The list of all device-devfreq governors */
+static LIST_HEAD(devfreq_governor_list);
/* The list of all device-devfreq */
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
@@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev)
}
/**
+ * devfreq_get_freq_level() - Lookup freq_table for the frequency
+ * @devfreq: the devfreq instance
+ * @freq: the target frequency
+ */
+static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev;
+
+ for (lev = 0; lev < devfreq->profile->max_state; lev++)
+ if (freq == devfreq->profile->freq_table[lev])
+ return lev;
+
+ return -EINVAL;
+}
+
+/**
+ * devfreq_update_status() - Update statistics of devfreq behavior
+ * @devfreq: the devfreq instance
+ * @freq: the update target frequency
+ */
+static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
+{
+ int lev, prev_lev;
+ unsigned long cur_time;
+
+ lev = devfreq_get_freq_level(devfreq, freq);
+ if (lev < 0)
+ return lev;
+
+ cur_time = jiffies;
+ devfreq->time_in_state[lev] +=
+ cur_time - devfreq->last_stat_updated;
+ if (freq != devfreq->previous_freq) {
+ prev_lev = devfreq_get_freq_level(devfreq,
+ devfreq->previous_freq);
+ devfreq->trans_table[(prev_lev *
+ devfreq->profile->max_state) + lev]++;
+ devfreq->total_trans++;
+ }
+ devfreq->last_stat_updated = cur_time;
+
+ return 0;
+}
+
+/**
+ * find_devfreq_governor() - find devfreq governor from name
+ * @name: name of the governor
+ *
+ * Search the list of devfreq governors and return the matched
+ * governor's pointer. devfreq_list_lock should be held by the caller.
+ */
+static struct devfreq_governor *find_devfreq_governor(const char *name)
+{
+ struct devfreq_governor *tmp_governor;
+
+ if (unlikely(IS_ERR_OR_NULL(name))) {
+ pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ WARN(!mutex_is_locked(&devfreq_list_lock),
+ "devfreq_list_lock must be locked.");
+
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
+ if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
+ return tmp_governor;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+/* Load monitoring helper functions for governors use */
+
+/**
* update_devfreq() - Reevaluate the device and configure frequency.
* @devfreq: the devfreq instance.
*
@@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
}
+ if (!devfreq->governor)
+ return -EINVAL;
+
/* Reevaluate the proper frequency */
err = devfreq->governor->get_target_freq(devfreq, &freq);
if (err)
@@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq)
if (err)
return err;
+ if (devfreq->profile->freq_table)
+ if (devfreq_update_status(devfreq, freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
+
devfreq->previous_freq = freq;
return err;
}
+EXPORT_SYMBOL(update_devfreq);
+
+/**
+ * devfreq_monitor() - Periodically poll devfreq objects.
+ * @work: the work struct used to run devfreq_monitor periodically.
+ *
+ */
+static void devfreq_monitor(struct work_struct *work)
+{
+ int err;
+ struct devfreq *devfreq = container_of(work,
+ struct devfreq, work.work);
+
+ mutex_lock(&devfreq->lock);
+ err = update_devfreq(devfreq);
+ if (err)
+ dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
+
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ mutex_unlock(&devfreq->lock);
+}
+
+/**
+ * devfreq_monitor_start() - Start load monitoring of devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function for starting devfreq device load monitoing. By
+ * default delayed work based monitoring is supported. Function
+ * to be called from governor in response to DEVFREQ_GOV_START
+ * event when device is added to devfreq framework.
+ */
+void devfreq_monitor_start(struct devfreq *devfreq)
+{
+ INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
+ if (devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+}
+EXPORT_SYMBOL(devfreq_monitor_start);
+
+/**
+ * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to stop devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_STOP
+ * event when device is removed from devfreq framework.
+ */
+void devfreq_monitor_stop(struct devfreq *devfreq)
+{
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_stop);
+
+/**
+ * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to suspend devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_SUSPEND
+ * event or when polling interval is set to zero.
+ *
+ * Note: Though this function is same as devfreq_monitor_stop(),
+ * intentionally kept separate to provide hooks for collecting
+ * transition statistics.
+ */
+void devfreq_monitor_suspend(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (devfreq->stop_polling) {
+ mutex_unlock(&devfreq->lock);
+ return;
+ }
+
+ devfreq->stop_polling = true;
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+}
+EXPORT_SYMBOL(devfreq_monitor_suspend);
+
+/**
+ * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
+ * @devfreq: the devfreq instance.
+ *
+ * Helper function to resume devfreq device load monitoing. Function
+ * to be called from governor in response to DEVFREQ_GOV_RESUME
+ * event or when polling interval is set to non-zero.
+ */
+void devfreq_monitor_resume(struct devfreq *devfreq)
+{
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ goto out;
+
+ if (!delayed_work_pending(&devfreq->work) &&
+ devfreq->profile->polling_ms)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ devfreq->stop_polling = false;
+
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_monitor_resume);
+
+/**
+ * devfreq_interval_update() - Update device devfreq monitoring interval
+ * @devfreq: the devfreq instance.
+ * @delay: new polling interval to be set.
+ *
+ * Helper function to set new load monitoring polling interval. Function
+ * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
+ */
+void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
+{
+ unsigned int cur_delay = devfreq->profile->polling_ms;
+ unsigned int new_delay = *delay;
+
+ mutex_lock(&devfreq->lock);
+ devfreq->profile->polling_ms = new_delay;
+
+ if (devfreq->stop_polling)
+ goto out;
+
+ /* if new delay is zero, stop polling */
+ if (!new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ return;
+ }
+
+ /* if current delay is zero, start polling with new delay */
+ if (!cur_delay) {
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ goto out;
+ }
+
+ /* if current delay is greater than new delay, restart polling */
+ if (cur_delay > new_delay) {
+ mutex_unlock(&devfreq->lock);
+ cancel_delayed_work_sync(&devfreq->work);
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling)
+ queue_delayed_work(devfreq_wq, &devfreq->work,
+ msecs_to_jiffies(devfreq->profile->polling_ms));
+ }
+out:
+ mutex_unlock(&devfreq->lock);
+}
+EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
* has been changed out of devfreq framework.
- * @nb the notifier_block (supposed to be devfreq->nb)
- * @type not used
- * @devp not used
+ * @nb: the notifier_block (supposed to be devfreq->nb)
+ * @type: not used
+ * @devp: not used
*
* Called by a notifier that uses devfreq->nb.
*/
@@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
}
/**
- * _remove_devfreq() - Remove devfreq from the device.
+ * _remove_devfreq() - Remove devfreq from the list and release its resources.
* @devfreq: the devfreq struct
* @skip: skip calling device_unregister().
- *
- * Note that the caller should lock devfreq->lock before calling
- * this. _remove_devfreq() will unlock it and free devfreq
- * internally. devfreq_list_lock should be locked by the caller
- * as well (not relased at return)
- *
- * Lock usage:
- * devfreq->lock: locked before call.
- * unlocked at return (and freed)
- * devfreq_list_lock: locked before call.
- * kept locked at return.
- * if devfreq is centrally polled.
- *
- * Freed memory:
- * devfreq
*/
static void _remove_devfreq(struct devfreq *devfreq, bool skip)
{
- if (!mutex_is_locked(&devfreq->lock)) {
- WARN(true, "devfreq->lock must be locked by the caller.\n");
- return;
- }
- if (!devfreq->governor->no_central_polling &&
- !mutex_is_locked(&devfreq_list_lock)) {
- WARN(true, "devfreq_list_lock must be locked by the caller.\n");
+ mutex_lock(&devfreq_list_lock);
+ if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
+ mutex_unlock(&devfreq_list_lock);
+ dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
return;
}
+ list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
- if (devfreq->being_removed)
- return;
-
- devfreq->being_removed = true;
+ if (devfreq->governor)
+ devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
- if (devfreq->governor->exit)
- devfreq->governor->exit(devfreq);
-
if (!skip && get_device(&devfreq->dev)) {
device_unregister(&devfreq->dev);
put_device(&devfreq->dev);
}
- if (!devfreq->governor->no_central_polling)
- list_del(&devfreq->node);
-
- mutex_unlock(&devfreq->lock);
mutex_destroy(&devfreq->lock);
-
kfree(devfreq);
}
@@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip)
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
- bool central_polling = !devfreq->governor->no_central_polling;
-
- /*
- * If devfreq_dev_release() was called by device_unregister() of
- * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
- * being_removed is already set. This also partially checks the case
- * where devfreq_dev_release() is called from a thread other than
- * the one called _remove_devfreq(); however, this case is
- * dealt completely with another following being_removed check.
- *
- * Because being_removed is never being
- * unset, we do not need to worry about race conditions on
- * being_removed.
- */
- if (devfreq->being_removed)
- return;
-
- if (central_polling)
- mutex_lock(&devfreq_list_lock);
-
- mutex_lock(&devfreq->lock);
- /*
- * Check being_removed flag again for the case where
- * devfreq_dev_release() was called in a thread other than the one
- * possibly called _remove_devfreq().
- */
- if (devfreq->being_removed) {
- mutex_unlock(&devfreq->lock);
- goto out;
- }
-
- /* devfreq->lock is unlocked and removed in _removed_devfreq() */
_remove_devfreq(devfreq, true);
-
-out:
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
-}
-
-/**
- * devfreq_monitor() - Periodically poll devfreq objects.
- * @work: the work struct used to run devfreq_monitor periodically.
- *
- */
-static void devfreq_monitor(struct work_struct *work)
-{
- static unsigned long last_polled_at;
- struct devfreq *devfreq, *tmp;
- int error;
- unsigned long jiffies_passed;
- unsigned long next_jiffies = ULONG_MAX, now = jiffies;
- struct device *dev;
-
- /* Initially last_polled_at = 0, polling every device at bootup */
- jiffies_passed = now - last_polled_at;
- last_polled_at = now;
- if (jiffies_passed == 0)
- jiffies_passed = 1;
-
- mutex_lock(&devfreq_list_lock);
- list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
- mutex_lock(&devfreq->lock);
- dev = devfreq->dev.parent;
-
- /* Do not remove tmp for a while */
- wait_remove_device = tmp;
-
- if (devfreq->governor->no_central_polling ||
- devfreq->next_polling == 0) {
- mutex_unlock(&devfreq->lock);
- continue;
- }
- mutex_unlock(&devfreq_list_lock);
-
- /*
- * Reduce more next_polling if devfreq_wq took an extra
- * delay. (i.e., CPU has been idled.)
- */
- if (devfreq->next_polling <= jiffies_passed) {
- error = update_devfreq(devfreq);
-
- /* Remove a devfreq with an error. */
- if (error && error != -EAGAIN) {
-
- dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
- error, devfreq->governor->name);
-
- /*
- * Unlock devfreq before locking the list
- * in order to avoid deadlock with
- * find_device_devfreq or others
- */
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- /* Check if devfreq is already removed */
- if (IS_ERR(find_device_devfreq(dev)))
- continue;
- mutex_lock(&devfreq->lock);
- /* This unlocks devfreq->lock and free it */
- _remove_devfreq(devfreq, false);
- continue;
- }
- devfreq->next_polling = devfreq->polling_jiffies;
- } else {
- devfreq->next_polling -= jiffies_passed;
- }
-
- if (devfreq->next_polling)
- next_jiffies = (next_jiffies > devfreq->next_polling) ?
- devfreq->next_polling : next_jiffies;
-
- mutex_unlock(&devfreq->lock);
- mutex_lock(&devfreq_list_lock);
- }
- wait_remove_device = NULL;
- mutex_unlock(&devfreq_list_lock);
-
- if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
- } else {
- polling = false;
- }
}
/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
- * @governor: the policy to choose frequency.
+ * @governor_name: name of the policy to choose frequency.
* @data: private data for the governor. The devfreq framework does not
* touch this value.
*/
struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
- const struct devfreq_governor *governor,
+ const char *governor_name,
void *data)
{
struct devfreq *devfreq;
+ struct devfreq_governor *governor;
int err = 0;
- if (!dev || !profile || !governor) {
+ if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
return ERR_PTR(-EINVAL);
}
-
- if (!governor->no_central_polling) {
- mutex_lock(&devfreq_list_lock);
- devfreq = find_device_devfreq(dev);
- mutex_unlock(&devfreq_list_lock);
- if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
- err = -EINVAL;
- goto err_out;
- }
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+ if (!IS_ERR(devfreq)) {
+ dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
+ err = -EINVAL;
+ goto err_out;
}
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
@@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
devfreq->profile = profile;
- devfreq->governor = governor;
+ strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->data = data;
- devfreq->next_polling = devfreq->polling_jiffies
- = msecs_to_jiffies(devfreq->profile->polling_ms);
devfreq->nb.notifier_call = devfreq_notifier_call;
+ devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->profile->max_state,
+ GFP_KERNEL);
+ devfreq->last_stat_updated = jiffies;
+
dev_set_name(&devfreq->dev, dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
put_device(&devfreq->dev);
+ mutex_unlock(&devfreq->lock);
goto err_dev;
}
- if (governor->init)
- err = governor->init(devfreq);
- if (err)
- goto err_init;
-
mutex_unlock(&devfreq->lock);
- if (governor->no_central_polling)
- goto out;
-
mutex_lock(&devfreq_list_lock);
-
list_add(&devfreq->node, &devfreq_list);
- if (devfreq_wq && devfreq->next_polling && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- devfreq->next_polling);
- }
+ governor = find_devfreq_governor(devfreq->governor_name);
+ if (!IS_ERR(governor))
+ devfreq->governor = governor;
+ if (devfreq->governor)
+ err = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
mutex_unlock(&devfreq_list_lock);
-out:
+ if (err) {
+ dev_err(dev, "%s: Unable to start governor for the device\n",
+ __func__);
+ goto err_init;
+ }
+
return devfreq;
err_init:
+ list_del(&devfreq->node);
device_unregister(&devfreq->dev);
err_dev:
- mutex_unlock(&devfreq->lock);
kfree(devfreq);
err_out:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(devfreq_add_device);
/**
* devfreq_remove_device() - Remove devfreq feature from a device.
- * @devfreq the devfreq instance to be removed
+ * @devfreq: the devfreq instance to be removed
*/
int devfreq_remove_device(struct devfreq *devfreq)
{
- bool central_polling;
+ if (!devfreq)
+ return -EINVAL;
+
+ _remove_devfreq(devfreq, false);
+ return 0;
+}
+EXPORT_SYMBOL(devfreq_remove_device);
+
+/**
+ * devfreq_suspend_device() - Suspend devfreq of a device.
+ * @devfreq: the devfreq instance to be suspended
+ */
+int devfreq_suspend_device(struct devfreq *devfreq)
+{
if (!devfreq)
return -EINVAL;
- central_polling = !devfreq->governor->no_central_polling;
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_SUSPEND, NULL);
+}
+EXPORT_SYMBOL(devfreq_suspend_device);
+
+/**
+ * devfreq_resume_device() - Resume devfreq of a device.
+ * @devfreq: the devfreq instance to be resumed
+ */
+int devfreq_resume_device(struct devfreq *devfreq)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ if (!devfreq->governor)
+ return 0;
+
+ return devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_RESUME, NULL);
+}
+EXPORT_SYMBOL(devfreq_resume_device);
+
+/**
+ * devfreq_add_governor() - Add devfreq governor
+ * @governor: the devfreq governor to be added
+ */
+int devfreq_add_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (!IS_ERR(g)) {
+ pr_err("%s: governor %s already registered\n", __func__,
+ g->name);
+ err = -EINVAL;
+ goto err_out;
+ }
- if (central_polling) {
- mutex_lock(&devfreq_list_lock);
- while (wait_remove_device == devfreq) {
- mutex_unlock(&devfreq_list_lock);
- schedule();
- mutex_lock(&devfreq_list_lock);
+ list_add(&governor->node, &devfreq_governor_list);
+
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret = 0;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* The following should never occur */
+ if (devfreq->governor) {
+ dev_warn(dev,
+ "%s: Governor %s already present\n",
+ __func__, devfreq->governor->name);
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev,
+ "%s: Governor %s stop = %d\n",
+ __func__,
+ devfreq->governor->name, ret);
+ }
+ /* Fall through */
+ }
+ devfreq->governor = governor;
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s start=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
}
}
- mutex_lock(&devfreq->lock);
- _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
+err_out:
+ mutex_unlock(&devfreq_list_lock);
- if (central_polling)
- mutex_unlock(&devfreq_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(devfreq_add_governor);
- return 0;
+/**
+ * devfreq_remove_device() - Remove devfreq feature from a device.
+ * @governor: the devfreq governor to be removed
+ */
+int devfreq_remove_governor(struct devfreq_governor *governor)
+{
+ struct devfreq_governor *g;
+ struct devfreq *devfreq;
+ int err = 0;
+
+ if (!governor) {
+ pr_err("%s: Invalid parameters.\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&devfreq_list_lock);
+ g = find_devfreq_governor(governor->name);
+ if (IS_ERR(g)) {
+ pr_err("%s: governor %s not registered\n", __func__,
+ governor->name);
+ err = PTR_ERR(g);
+ goto err_out;
+ }
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ int ret;
+ struct device *dev = devfreq->dev.parent;
+
+ if (!strncmp(devfreq->governor_name, governor->name,
+ DEVFREQ_NAME_LEN)) {
+ /* we should have a devfreq governor! */
+ if (!devfreq->governor) {
+ dev_warn(dev, "%s: Governor %s NOT present\n",
+ __func__, governor->name);
+ continue;
+ /* Fall through */
+ }
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s stop=%d\n",
+ __func__, devfreq->governor->name,
+ ret);
+ }
+ devfreq->governor = NULL;
+ }
+ }
+
+ list_del(&governor->node);
+err_out:
+ mutex_unlock(&devfreq_list_lock);
+
+ return err;
}
+EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t show_governor(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ if (!to_devfreq(dev)->governor)
+ return -EINVAL;
+
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
}
+static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ int ret;
+ char str_governor[DEVFREQ_NAME_LEN + 1];
+ struct devfreq_governor *governor;
+
+ ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&devfreq_list_lock);
+ governor = find_devfreq_governor(str_governor);
+ if (IS_ERR(governor)) {
+ ret = PTR_ERR(governor);
+ goto out;
+ }
+ if (df->governor == governor)
+ goto out;
+
+ if (df->governor) {
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (ret) {
+ dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
+ __func__, df->governor->name, ret);
+ goto out;
+ }
+ }
+ df->governor = governor;
+ strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret)
+ dev_warn(dev, "%s: Governor %s not started(%d)\n",
+ __func__, df->governor->name, ret);
+out:
+ mutex_unlock(&devfreq_list_lock);
+
+ if (!ret)
+ ret = count;
+ return ret;
+}
+static ssize_t show_available_governors(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq_governor *tmp_governor;
+ ssize_t count = 0;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%s ", tmp_governor->name);
+ mutex_unlock(&devfreq_list_lock);
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
static ssize_t show_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ unsigned long freq;
+ struct devfreq *devfreq = to_devfreq(dev);
+
+ if (devfreq->profile->get_cur_freq &&
+ !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
+ return sprintf(buf, "%lu\n", freq);
+
+ return sprintf(buf, "%lu\n", devfreq->previous_freq);
+}
+
+static ssize_t show_target_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
}
@@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev,
unsigned int value;
int ret;
+ if (!df->governor)
+ return -EINVAL;
+
ret = sscanf(buf, "%u", &value);
if (ret != 1)
- goto out;
-
- mutex_lock(&df->lock);
- df->profile->polling_ms = value;
- df->next_polling = df->polling_jiffies
- = msecs_to_jiffies(value);
- mutex_unlock(&df->lock);
+ return -EINVAL;
+ df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
ret = count;
- if (df->governor->no_central_polling)
- goto out;
-
- mutex_lock(&devfreq_list_lock);
- if (df->next_polling > 0 && !polling) {
- polling = true;
- queue_delayed_work(devfreq_wq, &devfreq_work,
- df->next_polling);
- }
- mutex_unlock(&devfreq_list_lock);
-out:
return ret;
}
-static ssize_t show_central_polling(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n",
- !to_devfreq(dev)->governor->no_central_polling);
-}
-
static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
max = df->max_freq;
@@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
- goto out;
+ return -EINVAL;
mutex_lock(&df->lock);
min = df->min_freq;
@@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
ret = count;
unlock:
mutex_unlock(&df->lock);
-out:
return ret;
}
@@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
}
+static ssize_t show_available_freqs(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(d);
+ struct device *dev = df->dev.parent;
+ struct opp *opp;
+ ssize_t count = 0;
+ unsigned long freq = 0;
+
+ rcu_read_lock();
+ do {
+ opp = opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
+ "%lu ", freq);
+ freq++;
+ } while (1);
+ rcu_read_unlock();
+
+ /* Truncate the trailing space */
+ if (count)
+ count--;
+
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+}
+
+static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *devfreq = to_devfreq(dev);
+ ssize_t len;
+ int i, j, err;
+ unsigned int max_state = devfreq->profile->max_state;
+
+ err = devfreq_update_status(devfreq, devfreq->previous_freq);
+ if (err)
+ return 0;
+
+ len = sprintf(buf, " From : To\n");
+ len += sprintf(buf + len, " :");
+ for (i = 0; i < max_state; i++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->profile->freq_table[i]);
+
+ len += sprintf(buf + len, " time(ms)\n");
+
+ for (i = 0; i < max_state; i++) {
+ if (devfreq->profile->freq_table[i]
+ == devfreq->previous_freq) {
+ len += sprintf(buf + len, "*");
+ } else {
+ len += sprintf(buf + len, " ");
+ }
+ len += sprintf(buf + len, "%8u:",
+ devfreq->profile->freq_table[i]);
+ for (j = 0; j < max_state; j++)
+ len += sprintf(buf + len, "%8u",
+ devfreq->trans_table[(i * max_state) + j]);
+ len += sprintf(buf + len, "%10u\n",
+ jiffies_to_msecs(devfreq->time_in_state[i]));
+ }
+
+ len += sprintf(buf + len, "Total transition : %u\n",
+ devfreq->total_trans);
+ return len;
+}
+
static struct device_attribute devfreq_attrs[] = {
- __ATTR(governor, S_IRUGO, show_governor, NULL),
+ __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
+ __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
- __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
+ __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
+ __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
store_polling_interval),
__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
+ __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
{ },
};
-/**
- * devfreq_start_polling() - Initialize data structure for devfreq framework and
- * start polling registered devfreq devices.
- */
-static int __init devfreq_start_polling(void)
-{
- mutex_lock(&devfreq_list_lock);
- polling = false;
- devfreq_wq = create_freezable_workqueue("devfreq_wq");
- INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
- mutex_unlock(&devfreq_list_lock);
-
- devfreq_monitor(&devfreq_work.work);
- return 0;
-}
-late_initcall(devfreq_start_polling);
-
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -622,7 +962,15 @@ static int __init devfreq_init(void)
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_class);
}
+
+ devfreq_wq = create_freezable_workqueue("devfreq_wq");
+ if (IS_ERR(devfreq_wq)) {
+ class_destroy(devfreq_class);
+ pr_err("%s: couldn't create workqueue\n", __FILE__);
+ return PTR_ERR(devfreq_wq);
+ }
devfreq_class->dev_attrs = devfreq_attrs;
+
return 0;
}
subsys_initcall(devfreq_init);
@@ -630,6 +978,7 @@ subsys_initcall(devfreq_init);
static void __exit devfreq_exit(void)
{
class_destroy(devfreq_class);
+ destroy_workqueue(devfreq_wq);
}
module_exit(devfreq_exit);
@@ -641,9 +990,9 @@ module_exit(devfreq_exit);
/**
* devfreq_recommended_opp() - Helper function to get proper OPP for the
* freq value given to target callback.
- * @dev The devfreq user device. (parent of devfreq)
- * @freq The frequency given to target function
- * @flags Flags handed from devfreq framework.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @freq: The frequency given to target function
+ * @flags: Flags handed from devfreq framework.
*
*/
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
@@ -656,14 +1005,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
opp = opp_find_freq_floor(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_ceil(dev, freq);
} else {
/* The freq is an lower bound. opp should be higher */
opp = opp_find_freq_ceil(dev, freq);
/* If not available, use the closest opp */
- if (opp == ERR_PTR(-ENODEV))
+ if (opp == ERR_PTR(-ERANGE))
opp = opp_find_freq_floor(dev, freq);
}
@@ -674,35 +1023,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
* for any changes in the OPP availability
* changes
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*/
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_register(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_register(nh, &devfreq->nb);
+
+ return ret;
}
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
* notified for any changes in the OPP
* availability changes anymore.
- * @dev The devfreq user device. (parent of devfreq)
- * @devfreq The devfreq object.
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
*
* At exit() callback of devfreq_dev_profile, this must be included if
* devfreq_recommended_opp is used.
*/
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
{
- struct srcu_notifier_head *nh = opp_get_notifier(dev);
+ struct srcu_notifier_head *nh;
+ int ret = 0;
+ rcu_read_lock();
+ nh = opp_get_notifier(dev);
if (IS_ERR(nh))
- return PTR_ERR(nh);
- return srcu_notifier_chain_unregister(nh, &devfreq->nb);
+ ret = PTR_ERR(nh);
+ rcu_read_unlock();
+ if (!ret)
+ ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
+
+ return ret;
}
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
index 88ddc77a9bb1..741837208716 100644
--- a/drivers/devfreq/exynos4_bus.c
+++ b/drivers/devfreq/exynos4_bus.c
@@ -987,7 +987,7 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int err = 0;
- data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
if (data == NULL) {
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
@@ -1012,31 +1012,26 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
err = -EINVAL;
}
if (err)
- goto err_regulator;
+ return err;
- data->vdd_int = regulator_get(dev, "vdd_int");
+ data->vdd_int = devm_regulator_get(dev, "vdd_int");
if (IS_ERR(data->vdd_int)) {
dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- err = PTR_ERR(data->vdd_int);
- goto err_regulator;
+ return PTR_ERR(data->vdd_int);
}
if (data->type == TYPE_BUSF_EXYNOS4x12) {
- data->vdd_mif = regulator_get(dev, "vdd_mif");
+ data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
if (IS_ERR(data->vdd_mif)) {
dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
- err = PTR_ERR(data->vdd_mif);
- regulator_put(data->vdd_int);
- goto err_regulator;
-
+ return PTR_ERR(data->vdd_mif);
}
}
opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
if (IS_ERR(opp)) {
dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos4_devfreq_profile.initial_freq);
- err = PTR_ERR(opp);
- goto err_opp_add;
+ exynos4_devfreq_profile.initial_freq);
+ return PTR_ERR(opp);
}
data->curr_opp = opp;
@@ -1045,30 +1040,20 @@ static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
busfreq_mon_reset(data);
data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
- &devfreq_simple_ondemand, NULL);
- if (IS_ERR(data->devfreq)) {
- err = PTR_ERR(data->devfreq);
- goto err_opp_add;
- }
+ "simple_ondemand", NULL);
+ if (IS_ERR(data->devfreq))
+ return PTR_ERR(data->devfreq);
devfreq_register_opp_notifier(dev, data->devfreq);
err = register_pm_notifier(&data->pm_notifier);
if (err) {
dev_err(dev, "Failed to setup pm notifier\n");
- goto err_devfreq_add;
+ devfreq_remove_device(data->devfreq);
+ return err;
}
return 0;
-err_devfreq_add:
- devfreq_remove_device(data->devfreq);
-err_opp_add:
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- regulator_put(data->vdd_int);
-err_regulator:
- kfree(data);
- return err;
}
static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
@@ -1077,10 +1062,6 @@ static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
unregister_pm_notifier(&data->pm_notifier);
devfreq_remove_device(data->devfreq);
- regulator_put(data->vdd_int);
- if (data->vdd_mif)
- regulator_put(data->vdd_mif);
- kfree(data);
return 0;
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index ea7f13c58ded..fad7d6321978 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,24 @@
#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
/* Caution: devfreq->lock must be locked before calling update_devfreq */
extern int update_devfreq(struct devfreq *devfreq);
+extern void devfreq_monitor_start(struct devfreq *devfreq);
+extern void devfreq_monitor_stop(struct devfreq *devfreq);
+extern void devfreq_monitor_suspend(struct devfreq *devfreq);
+extern void devfreq_monitor_resume(struct devfreq *devfreq);
+extern void devfreq_interval_update(struct devfreq *devfreq,
+ unsigned int *delay);
+
+extern int devfreq_add_governor(struct devfreq_governor *governor);
+extern int devfreq_remove_governor(struct devfreq_governor *governor);
+
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index af75ddd4f158..c72f942f30a8 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
@@ -26,14 +27,41 @@ static int devfreq_performance_func(struct devfreq *df,
return 0;
}
-static int performance_init(struct devfreq *devfreq)
+static int devfreq_performance_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_performance = {
+static struct devfreq_governor devfreq_performance = {
.name = "performance",
- .init = performance_init,
.get_target_freq = devfreq_performance_func,
- .no_central_polling = true,
+ .event_handler = devfreq_performance_handler,
};
+
+static int __init devfreq_performance_init(void)
+{
+ return devfreq_add_governor(&devfreq_performance);
+}
+subsys_initcall(devfreq_performance_init);
+
+static void __exit devfreq_performance_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_performance);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_performance_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index fec0cdbd2477..0c6bed567e6d 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -10,6 +10,7 @@
*/
#include <linux/devfreq.h>
+#include <linux/module.h>
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
@@ -23,14 +24,41 @@ static int devfreq_powersave_func(struct devfreq *df,
return 0;
}
-static int powersave_init(struct devfreq *devfreq)
+static int devfreq_powersave_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
{
- return update_devfreq(devfreq);
+ int ret = 0;
+
+ if (event == DEVFREQ_GOV_START) {
+ mutex_lock(&devfreq->lock);
+ ret = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ }
+
+ return ret;
}
-const struct devfreq_governor devfreq_powersave = {
+static struct devfreq_governor devfreq_powersave = {
.name = "powersave",
- .init = powersave_init,
.get_target_freq = devfreq_powersave_func,
- .no_central_polling = true,
+ .event_handler = devfreq_powersave_handler,
};
+
+static int __init devfreq_powersave_init(void)
+{
+ return devfreq_add_governor(&devfreq_powersave);
+}
+subsys_initcall(devfreq_powersave_init);
+
+static void __exit devfreq_powersave_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_powersave);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_powersave_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index a2e3eae79011..0720ba84ca92 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -10,8 +10,10 @@
*/
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/math64.h>
+#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
@@ -88,7 +90,58 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
return 0;
}
-const struct devfreq_governor devfreq_simple_ondemand = {
+static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq_monitor_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ devfreq_monitor_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ devfreq_interval_update(devfreq, (unsigned int *)data);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq_monitor_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ devfreq_monitor_resume(devfreq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_simple_ondemand = {
.name = "simple_ondemand",
.get_target_freq = devfreq_simple_ondemand_func,
+ .event_handler = devfreq_simple_ondemand_handler,
};
+
+static int __init devfreq_simple_ondemand_init(void)
+{
+ return devfreq_add_governor(&devfreq_simple_ondemand);
+}
+subsys_initcall(devfreq_simple_ondemand_init);
+
+static void __exit devfreq_simple_ondemand_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_simple_ondemand);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_simple_ondemand_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0681246fc89d..35de6e83c1fe 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -14,6 +14,7 @@
#include <linux/devfreq.h>
#include <linux/pm.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include "governor.h"
struct userspace_data {
@@ -116,10 +117,46 @@ static void userspace_exit(struct devfreq *devfreq)
devfreq->data = NULL;
}
-const struct devfreq_governor devfreq_userspace = {
+static int devfreq_userspace_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret = 0;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ ret = userspace_init(devfreq);
+ break;
+ case DEVFREQ_GOV_STOP:
+ userspace_exit(devfreq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct devfreq_governor devfreq_userspace = {
.name = "userspace",
.get_target_freq = devfreq_userspace_func,
- .init = userspace_init,
- .exit = userspace_exit,
- .no_central_polling = true,
+ .event_handler = devfreq_userspace_handler,
};
+
+static int __init devfreq_userspace_init(void)
+{
+ return devfreq_add_governor(&devfreq_userspace);
+}
+subsys_initcall(devfreq_userspace_init);
+
+static void __exit devfreq_userspace_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_userspace);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+ return;
+}
+module_exit(devfreq_userspace_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 47150f5ded04..f16557690cfd 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -49,6 +49,10 @@ config OF_GPIO
def_bool y
depends on OF
+config GPIO_ACPI
+ def_bool y
+ depends on ACPI
+
config DEBUG_GPIO
bool "Debug GPIO calls"
depends on DEBUG_KERNEL
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 9aeed6707326..420dbaca05f1 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
new file mode 100644
index 000000000000..cbad6e908d30
--- /dev/null
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -0,0 +1,54 @@
+/*
+ * ACPI helpers for GPIO API
+ *
+ * Copyright (C) 2012, Intel Corporation
+ * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/export.h>
+#include <linux/acpi_gpio.h>
+#include <linux/acpi.h>
+
+static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
+{
+ if (!gc->dev)
+ return false;
+
+ return ACPI_HANDLE(gc->dev) == data;
+}
+
+/**
+ * acpi_get_gpio() - Translate ACPI GPIO pin to GPIO number usable with GPIO API
+ * @path: ACPI GPIO controller full path name, (e.g. "\\_SB.GPO1")
+ * @pin: ACPI GPIO pin number (0-based, controller-relative)
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or errno error value
+ */
+
+int acpi_get_gpio(char *path, int pin)
+{
+ struct gpio_chip *chip;
+ acpi_handle handle;
+ acpi_status status;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ chip = gpiochip_find(handle, acpi_gpiochip_find);
+ if (!chip)
+ return -ENODEV;
+
+ if (!gpio_is_valid(chip->base + pin))
+ return -EINVAL;
+
+ return chip->base + pin;
+}
+EXPORT_SYMBOL_GPL(acpi_get_gpio);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index a7edf987a339..e388590b44ab 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -39,6 +39,7 @@
#include <linux/irqflags.h>
#include <linux/rwsem.h>
#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
@@ -78,6 +79,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
driver = to_i2c_driver(drv);
/* match on an id table if there is one */
if (driver->id_table)
@@ -539,6 +544,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
client->dev.of_node = info->of_node;
+ ACPI_HANDLE_SET(&client->dev, info->acpi_node.handle);
/* For 10-bit clients, add an arbitrary offset to avoid collisions */
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b0f6b4c8ee14..c49c04d9c2b0 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,7 +56,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/clockchips.h>
-#include <linux/hrtimer.h> /* ktime_get_real() */
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/notifier.h>
@@ -72,6 +71,7 @@
static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle",
.owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
};
/* intel_idle.max_cstate=0 disables driver */
static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
@@ -281,8 +281,6 @@ static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
unsigned int cstate;
- ktime_t kt_before, kt_after;
- s64 usec_delta;
int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
@@ -297,8 +295,6 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
- kt_before = ktime_get_real();
-
stop_critical_timings();
if (!need_resched()) {
@@ -310,17 +306,9 @@ static int intel_idle(struct cpuidle_device *dev,
start_critical_timings();
- kt_after = ktime_get_real();
- usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
-
- local_irq_enable();
-
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
- /* Update cpuidle counters */
- dev->last_residency = (int)usec_delta;
-
return index;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 83eb1e06ff76..bebbe167fd89 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,18 @@ config MMC_RICOH_MMC
If unsure, say Y.
+config MMC_SDHCI_ACPI
+ tristate "SDHCI support for ACPI enumerated SDHCI controllers"
+ depends on MMC_SDHCI && ACPI
+ help
+ This selects support for ACPI enumerated SDHCI controllers,
+ identified by ACPI Compatibility ID PNP0D40 or specific
+ ACPI Hardware IDs.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_PLTFM
tristate "SDHCI platform and OF driver helper"
depends on MMC_SDHCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 39d5e1234709..c5eddc1b4833 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
+obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
new file mode 100644
index 000000000000..12b0a78497f6
--- /dev/null
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -0,0 +1,312 @@
+/*
+ * Secure Digital Host Controller Interface ACPI driver.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/compiler.h>
+#include <linux/stddef.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/pm.h>
+#include <linux/mmc/sdhci.h>
+
+#include "sdhci.h"
+
+enum {
+ SDHCI_ACPI_SD_CD = BIT(0),
+ SDHCI_ACPI_RUNTIME_PM = BIT(1),
+};
+
+struct sdhci_acpi_chip {
+ const struct sdhci_ops *ops;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+};
+
+struct sdhci_acpi_slot {
+ const struct sdhci_acpi_chip *chip;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned long caps;
+ unsigned int caps2;
+ mmc_pm_flag_t pm_caps;
+ unsigned int flags;
+};
+
+struct sdhci_acpi_host {
+ struct sdhci_host *host;
+ const struct sdhci_acpi_slot *slot;
+ struct platform_device *pdev;
+ bool use_runtime_pm;
+};
+
+static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
+{
+ return c->slot && (c->slot->flags & flag);
+}
+
+static int sdhci_acpi_enable_dma(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_dflt = {
+ .enable_dma = sdhci_acpi_enable_dma,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
+ .flags = SDHCI_ACPI_RUNTIME_PM,
+ .pm_caps = MMC_PM_KEEP_POWER,
+};
+
+static const struct acpi_device_id sdhci_acpi_ids[] = {
+ { "INT33C6", (kernel_ulong_t)&sdhci_acpi_slot_int_sdio },
+ { "PNP0D40" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid)
+{
+ const struct acpi_device_id *id;
+
+ for (id = sdhci_acpi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, hid))
+ return (const struct sdhci_acpi_slot *)id->driver_data;
+ return NULL;
+}
+
+static int __devinit sdhci_acpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ struct acpi_device *device;
+ struct sdhci_acpi_host *c;
+ struct sdhci_host *host;
+ struct resource *iomem;
+ resource_size_t len;
+ const char *hid;
+ int err;
+
+ if (acpi_bus_get_device(handle, &device))
+ return -ENODEV;
+
+ if (acpi_bus_get_status(device) || !device->status.present)
+ return -ENODEV;
+
+ hid = acpi_device_hid(device);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -ENOMEM;
+
+ len = resource_size(iomem);
+ if (len < 0x100)
+ dev_err(dev, "Invalid iomem size!\n");
+
+ if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
+ return -ENOMEM;
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ c = sdhci_priv(host);
+ c->host = host;
+ c->slot = sdhci_acpi_get_slot(hid);
+ c->pdev = pdev;
+ c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
+
+ platform_set_drvdata(pdev, c);
+
+ host->hw_name = "ACPI";
+ host->ops = &sdhci_acpi_ops_dflt;
+ host->irq = platform_get_irq(pdev, 0);
+
+ host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
+ resource_size(iomem));
+ if (host->ioaddr == NULL) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (!dev->dma_mask) {
+ u64 dma_mask;
+
+ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) {
+ /* 64-bit DMA is not supported at present */
+ dma_mask = DMA_BIT_MASK(32);
+ } else {
+ dma_mask = DMA_BIT_MASK(32);
+ }
+
+ dev->dma_mask = &dev->coherent_dma_mask;
+ dev->coherent_dma_mask = dma_mask;
+ }
+
+ if (c->slot) {
+ if (c->slot->chip) {
+ host->ops = c->slot->chip->ops;
+ host->quirks |= c->slot->chip->quirks;
+ host->quirks2 |= c->slot->chip->quirks2;
+ host->mmc->caps |= c->slot->chip->caps;
+ host->mmc->caps2 |= c->slot->chip->caps2;
+ host->mmc->pm_caps |= c->slot->chip->pm_caps;
+ }
+ host->quirks |= c->slot->quirks;
+ host->quirks2 |= c->slot->quirks2;
+ host->mmc->caps |= c->slot->caps;
+ host->mmc->caps2 |= c->slot->caps2;
+ host->mmc->pm_caps |= c->slot->pm_caps;
+ }
+
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_free;
+
+ if (c->use_runtime_pm) {
+ pm_suspend_ignore_children(dev, 1);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return 0;
+
+err_free:
+ platform_set_drvdata(pdev, NULL);
+ sdhci_free_host(c->host);
+ return err;
+}
+
+static int __devexit sdhci_acpi_remove(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int dead;
+
+ if (c->use_runtime_pm) {
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ }
+
+ dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
+ sdhci_remove_host(c->host, dead);
+ platform_set_drvdata(pdev, NULL);
+ sdhci_free_host(c->host);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int sdhci_acpi_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(c->host);
+}
+
+static int sdhci_acpi_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_resume_host(c->host);
+}
+
+#else
+
+#define sdhci_acpi_suspend NULL
+#define sdhci_acpi_resume NULL
+
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int sdhci_acpi_runtime_suspend(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_suspend_host(c->host);
+}
+
+static int sdhci_acpi_runtime_resume(struct device *dev)
+{
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+
+ return sdhci_runtime_resume_host(c->host);
+}
+
+static int sdhci_acpi_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+#else
+
+#define sdhci_acpi_runtime_suspend NULL
+#define sdhci_acpi_runtime_resume NULL
+#define sdhci_acpi_runtime_idle NULL
+
+#endif
+
+static const struct dev_pm_ops sdhci_acpi_pm_ops = {
+ .suspend = sdhci_acpi_suspend,
+ .resume = sdhci_acpi_resume,
+ .runtime_suspend = sdhci_acpi_runtime_suspend,
+ .runtime_resume = sdhci_acpi_runtime_resume,
+ .runtime_idle = sdhci_acpi_runtime_idle,
+};
+
+static struct platform_driver sdhci_acpi_driver = {
+ .driver = {
+ .name = "sdhci-acpi",
+ .owner = THIS_MODULE,
+ .acpi_match_table = sdhci_acpi_ids,
+ .pm = &sdhci_acpi_pm_ops,
+ },
+ .probe = sdhci_acpi_probe,
+ .remove = __devexit_p(sdhci_acpi_remove),
+};
+
+module_platform_driver(sdhci_acpi_driver);
+
+MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver");
+MODULE_AUTHOR("Adrian Hunter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 4fbfe96e37a1..f48ac5d80bbf 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -727,7 +727,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
if (!flctl->qos_request) {
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
- &flctl->pm_qos, 100);
+ &flctl->pm_qos,
+ DEV_PM_QOS_LATENCY,
+ 100);
if (ret < 0)
dev_err(&flctl->pdev->dev,
"PM QoS request failed: %d\n", ret);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c5792d622dc4..1af4008182fd 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -17,10 +17,9 @@
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include "pci.h"
-static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
-
/**
* pci_acpi_wake_bus - Wake-up notification handler for root buses.
* @handle: ACPI handle of a device the notification is for.
@@ -68,67 +67,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
}
/**
- * add_pm_notifier - Register PM notifier for given ACPI device.
- * @dev: ACPI device to add the notifier for.
- * @context: PCI device or bus to check for PME status if an event is signaled.
- *
- * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
- * PM wake-up events. For example, wake-up events may be generated for bridges
- * if one of the devices below the bridge is signaling PME, even if the bridge
- * itself doesn't have a wake-up GPE associated with it.
- */
-static acpi_status add_pm_notifier(struct acpi_device *dev,
- acpi_notify_handler handler,
- void *context)
-{
- acpi_status status = AE_ALREADY_EXISTS;
-
- mutex_lock(&pci_acpi_pm_notify_mtx);
-
- if (dev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_install_notify_handler(dev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
- if (ACPI_FAILURE(status))
- goto out;
-
- dev->wakeup.flags.notifier_present = true;
-
- out:
- mutex_unlock(&pci_acpi_pm_notify_mtx);
- return status;
-}
-
-/**
- * remove_pm_notifier - Unregister PM notifier from given ACPI device.
- * @dev: ACPI device to remove the notifier from.
- */
-static acpi_status remove_pm_notifier(struct acpi_device *dev,
- acpi_notify_handler handler)
-{
- acpi_status status = AE_BAD_PARAMETER;
-
- mutex_lock(&pci_acpi_pm_notify_mtx);
-
- if (!dev->wakeup.flags.notifier_present)
- goto out;
-
- status = acpi_remove_notify_handler(dev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler);
- if (ACPI_FAILURE(status))
- goto out;
-
- dev->wakeup.flags.notifier_present = false;
-
- out:
- mutex_unlock(&pci_acpi_pm_notify_mtx);
- return status;
-}
-
-/**
* pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
* @dev: ACPI device to add the notifier for.
* @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
@@ -136,7 +74,7 @@ static acpi_status remove_pm_notifier(struct acpi_device *dev,
acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
struct pci_bus *pci_bus)
{
- return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
+ return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
}
/**
@@ -145,7 +83,7 @@ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
*/
acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
{
- return remove_pm_notifier(dev, pci_acpi_wake_bus);
+ return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
}
/**
@@ -156,7 +94,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev)
{
- return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
+ return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
}
/**
@@ -165,7 +103,7 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
*/
acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
{
- return remove_pm_notifier(dev, pci_acpi_wake_dev);
+ return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
}
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
@@ -257,11 +195,16 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return -ENODEV;
switch (state) {
+ case PCI_D3cold:
+ if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
+ PM_QOS_FLAGS_ALL) {
+ error = -EBUSY;
+ break;
+ }
case PCI_D0:
case PCI_D1:
case PCI_D2:
case PCI_D3hot:
- case PCI_D3cold:
error = acpi_bus_set_power(handle, state_conv[state]);
}
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index fa4e0a5db3f8..ffd53e3eb92f 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -159,6 +159,8 @@ struct pnp_resource {
void pnp_free_resource(struct pnp_resource *pnp_res);
+struct pnp_resource *pnp_add_resource(struct pnp_dev *dev,
+ struct resource *res);
struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
int flags);
struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 26b5d4b18dd7..72e822e17d47 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \
return 0
#define TEST_ALPHA(c) \
- if (!('@' <= (c) || (c) <= 'Z')) \
+ if (!('A' <= (c) && (c) <= 'Z')) \
return 0
static int __init ispnpidacpi(const char *id)
{
@@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
return -ENODEV;
}
+ if (WARN_ON_ONCE(acpi_dev != dev->data))
+ dev->data = acpi_dev;
+
ret = pnpacpi_build_resource_template(dev, &buffer);
if (ret)
return ret;
@@ -242,6 +245,10 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
char *pnpid;
struct acpi_hardware_id *id;
+ /* Skip devices that are already bound */
+ if (device->physical_node_count)
+ return 0;
+
/*
* If a PnPacpi device is not present , the device
* driver should not be loaded.
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 5be4a392a3ae..b8f4ea7b27fc 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -28,37 +28,6 @@
#include "../base.h"
#include "pnpacpi.h"
-#ifdef CONFIG_IA64
-#define valid_IRQ(i) (1)
-#else
-#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
-#endif
-
-/*
- * Allocated Resources
- */
-static int irq_flags(int triggering, int polarity, int shareable)
-{
- int flags;
-
- if (triggering == ACPI_LEVEL_SENSITIVE) {
- if (polarity == ACPI_ACTIVE_LOW)
- flags = IORESOURCE_IRQ_LOWLEVEL;
- else
- flags = IORESOURCE_IRQ_HIGHLEVEL;
- } else {
- if (polarity == ACPI_ACTIVE_LOW)
- flags = IORESOURCE_IRQ_LOWEDGE;
- else
- flags = IORESOURCE_IRQ_HIGHEDGE;
- }
-
- if (shareable == ACPI_SHARED)
- flags |= IORESOURCE_IRQ_SHAREABLE;
-
- return flags;
-}
-
static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering,
int *polarity, int *shareable)
{
@@ -94,45 +63,6 @@ static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering,
*shareable = ACPI_EXCLUSIVE;
}
-static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
- u32 gsi, int triggering,
- int polarity, int shareable)
-{
- int irq, flags;
- int p, t;
-
- if (!valid_IRQ(gsi)) {
- pnp_add_irq_resource(dev, gsi, IORESOURCE_DISABLED);
- return;
- }
-
- /*
- * in IO-APIC mode, use overrided attribute. Two reasons:
- * 1. BIOS bug in DSDT
- * 2. BIOS uses IO-APIC mode Interrupt Source Override
- */
- if (!acpi_get_override_irq(gsi, &t, &p)) {
- t = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
- if (triggering != t || polarity != p) {
- dev_warn(&dev->dev, "IRQ %d override to %s, %s\n",
- gsi, t ? "edge":"level", p ? "low":"high");
- triggering = t;
- polarity = p;
- }
- }
-
- flags = irq_flags(triggering, polarity, shareable);
- irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
- if (irq >= 0)
- pcibios_penalize_isa_irq(irq, 1);
- else
- flags |= IORESOURCE_DISABLED;
-
- pnp_add_irq_resource(dev, irq, flags);
-}
-
static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
int transfer)
{
@@ -177,21 +107,16 @@ static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
return flags;
}
-static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start,
- u64 len, int io_decode,
- int window)
-{
- int flags = 0;
- u64 end = start + len - 1;
+/*
+ * Allocated Resources
+ */
- if (io_decode == ACPI_DECODE_16)
- flags |= IORESOURCE_IO_16BIT_ADDR;
- if (len == 0 || end >= 0x10003)
- flags |= IORESOURCE_DISABLED;
- if (window)
- flags |= IORESOURCE_WINDOW;
+static void pnpacpi_add_irqresource(struct pnp_dev *dev, struct resource *r)
+{
+ if (!(r->flags & IORESOURCE_DISABLED))
+ pcibios_penalize_isa_irq(r->start, 1);
- pnp_add_io_resource(dev, start, end, flags);
+ pnp_add_resource(dev, r);
}
/*
@@ -249,130 +174,49 @@ static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
}
}
-static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev,
- u64 start, u64 len,
- int write_protect, int window)
-{
- int flags = 0;
- u64 end = start + len - 1;
-
- if (len == 0)
- flags |= IORESOURCE_DISABLED;
- if (write_protect == ACPI_READ_WRITE_MEMORY)
- flags |= IORESOURCE_MEM_WRITEABLE;
- if (window)
- flags |= IORESOURCE_WINDOW;
-
- pnp_add_mem_resource(dev, start, end, flags);
-}
-
-static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev,
- u64 start, u64 len)
-{
- u64 end = start + len - 1;
-
- pnp_add_bus_resource(dev, start, end);
-}
-
-static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
- struct acpi_resource *res)
-{
- struct acpi_resource_address64 addr, *p = &addr;
- acpi_status status;
- int window;
- u64 len;
-
- status = acpi_resource_to_address64(res, p);
- if (!ACPI_SUCCESS(status)) {
- dev_warn(&dev->dev, "failed to convert resource type %d\n",
- res->type);
- return;
- }
-
- /* Windows apparently computes length rather than using _LEN */
- len = p->maximum - p->minimum + 1;
- window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
-
- if (p->resource_type == ACPI_MEMORY_RANGE)
- pnpacpi_parse_allocated_memresource(dev, p->minimum, len,
- p->info.mem.write_protect, window);
- else if (p->resource_type == ACPI_IO_RANGE)
- pnpacpi_parse_allocated_ioresource(dev, p->minimum, len,
- p->granularity == 0xfff ? ACPI_DECODE_10 :
- ACPI_DECODE_16, window);
- else if (p->resource_type == ACPI_BUS_NUMBER_RANGE)
- pnpacpi_parse_allocated_busresource(dev, p->minimum, len);
-}
-
-static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
- struct acpi_resource *res)
-{
- struct acpi_resource_extended_address64 *p = &res->data.ext_address64;
- int window;
- u64 len;
-
- /* Windows apparently computes length rather than using _LEN */
- len = p->maximum - p->minimum + 1;
- window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0;
-
- if (p->resource_type == ACPI_MEMORY_RANGE)
- pnpacpi_parse_allocated_memresource(dev, p->minimum, len,
- p->info.mem.write_protect, window);
- else if (p->resource_type == ACPI_IO_RANGE)
- pnpacpi_parse_allocated_ioresource(dev, p->minimum, len,
- p->granularity == 0xfff ? ACPI_DECODE_10 :
- ACPI_DECODE_16, window);
- else if (p->resource_type == ACPI_BUS_NUMBER_RANGE)
- pnpacpi_parse_allocated_busresource(dev, p->minimum, len);
-}
-
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
struct pnp_dev *dev = data;
- struct acpi_resource_irq *irq;
struct acpi_resource_dma *dma;
- struct acpi_resource_io *io;
- struct acpi_resource_fixed_io *fixed_io;
struct acpi_resource_vendor_typed *vendor_typed;
- struct acpi_resource_memory24 *memory24;
- struct acpi_resource_memory32 *memory32;
- struct acpi_resource_fixed_memory32 *fixed_memory32;
- struct acpi_resource_extended_irq *extended_irq;
+ struct resource r;
int i, flags;
- switch (res->type) {
- case ACPI_RESOURCE_TYPE_IRQ:
- /*
- * Per spec, only one interrupt per descriptor is allowed in
- * _CRS, but some firmware violates this, so parse them all.
- */
- irq = &res->data.irq;
- if (irq->interrupt_count == 0)
- pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
- else {
- for (i = 0; i < irq->interrupt_count; i++) {
- pnpacpi_parse_allocated_irqresource(dev,
- irq->interrupts[i],
- irq->triggering,
- irq->polarity,
- irq->sharable);
- }
+ if (acpi_dev_resource_memory(res, &r)
+ || acpi_dev_resource_io(res, &r)
+ || acpi_dev_resource_address_space(res, &r)
+ || acpi_dev_resource_ext_address_space(res, &r)) {
+ pnp_add_resource(dev, &r);
+ return AE_OK;
+ }
+ r.flags = 0;
+ if (acpi_dev_resource_interrupt(res, 0, &r)) {
+ pnpacpi_add_irqresource(dev, &r);
+ for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++)
+ pnpacpi_add_irqresource(dev, &r);
+
+ if (i > 1) {
/*
* The IRQ encoder puts a single interrupt in each
* descriptor, so if a _CRS descriptor has more than
* one interrupt, we won't be able to re-encode it.
*/
- if (pnp_can_write(dev) && irq->interrupt_count > 1) {
+ if (pnp_can_write(dev)) {
dev_warn(&dev->dev, "multiple interrupts in "
"_CRS descriptor; configuration can't "
"be changed\n");
dev->capabilities &= ~PNP_WRITE;
}
}
- break;
+ return AE_OK;
+ } else if (r.flags & IORESOURCE_DISABLED) {
+ pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
+ return AE_OK;
+ }
+ switch (res->type) {
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
@@ -383,26 +227,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
pnp_add_dma_resource(dev, dma->channels[0], flags);
break;
- case ACPI_RESOURCE_TYPE_IO:
- io = &res->data.io;
- pnpacpi_parse_allocated_ioresource(dev,
- io->minimum,
- io->address_length,
- io->io_decode, 0);
- break;
-
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
break;
- case ACPI_RESOURCE_TYPE_FIXED_IO:
- fixed_io = &res->data.fixed_io;
- pnpacpi_parse_allocated_ioresource(dev,
- fixed_io->address,
- fixed_io->address_length,
- ACPI_DECODE_10, 0);
- break;
-
case ACPI_RESOURCE_TYPE_VENDOR:
vendor_typed = &res->data.vendor_typed;
pnpacpi_parse_allocated_vendor(dev, vendor_typed);
@@ -411,66 +239,6 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_END_TAG:
break;
- case ACPI_RESOURCE_TYPE_MEMORY24:
- memory24 = &res->data.memory24;
- pnpacpi_parse_allocated_memresource(dev,
- memory24->minimum,
- memory24->address_length,
- memory24->write_protect, 0);
- break;
- case ACPI_RESOURCE_TYPE_MEMORY32:
- memory32 = &res->data.memory32;
- pnpacpi_parse_allocated_memresource(dev,
- memory32->minimum,
- memory32->address_length,
- memory32->write_protect, 0);
- break;
- case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- fixed_memory32 = &res->data.fixed_memory32;
- pnpacpi_parse_allocated_memresource(dev,
- fixed_memory32->address,
- fixed_memory32->address_length,
- fixed_memory32->write_protect, 0);
- break;
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- pnpacpi_parse_allocated_address_space(dev, res);
- break;
-
- case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
- pnpacpi_parse_allocated_ext_address_space(dev, res);
- break;
-
- case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
- extended_irq = &res->data.extended_irq;
-
- if (extended_irq->interrupt_count == 0)
- pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
- else {
- for (i = 0; i < extended_irq->interrupt_count; i++) {
- pnpacpi_parse_allocated_irqresource(dev,
- extended_irq->interrupts[i],
- extended_irq->triggering,
- extended_irq->polarity,
- extended_irq->sharable);
- }
-
- /*
- * The IRQ encoder puts a single interrupt in each
- * descriptor, so if a _CRS descriptor has more than
- * one interrupt, we won't be able to re-encode it.
- */
- if (pnp_can_write(dev) &&
- extended_irq->interrupt_count > 1) {
- dev_warn(&dev->dev, "multiple interrupts in "
- "_CRS descriptor; configuration can't "
- "be changed\n");
- dev->capabilities &= ~PNP_WRITE;
- }
- }
- break;
-
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
@@ -531,7 +299,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -555,7 +323,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = irq_flags(p->triggering, p->polarity, p->sharable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index b0ecacbe53b1..3e6db1c1dc29 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -503,6 +503,22 @@ static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev)
return pnp_res;
}
+struct pnp_resource *pnp_add_resource(struct pnp_dev *dev,
+ struct resource *res)
+{
+ struct pnp_resource *pnp_res;
+
+ pnp_res = pnp_new_resource(dev);
+ if (!pnp_res) {
+ dev_err(&dev->dev, "can't add resource %pR\n", res);
+ return NULL;
+ }
+
+ pnp_res->res = *res;
+ dev_dbg(&dev->dev, "%pR\n", res);
+ return pnp_res;
+}
+
struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
int flags)
{
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index d3e64080c409..718cc1f49230 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -35,6 +35,8 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
static void spidev_release(struct device *dev)
{
@@ -93,6 +95,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then try ACPI */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
if (sdrv->id_table)
return !!spi_match_id(sdrv->id_table, spi);
@@ -888,6 +894,100 @@ static void of_register_spi_devices(struct spi_master *master)
static void of_register_spi_devices(struct spi_master *master) { }
#endif
+#ifdef CONFIG_ACPI
+static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct spi_device *spi = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_spi_serialbus *sb;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+ spi->chip_select = sb->device_selection;
+ spi->max_speed_hz = sb->connection_speed;
+
+ if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
+ spi->mode |= SPI_CPHA;
+ if (sb->clock_polarity == ACPI_SPI_START_HIGH)
+ spi->mode |= SPI_CPOL;
+ if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
+ spi->mode |= SPI_CS_HIGH;
+ }
+ } else if (spi->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ spi->irq = r.start;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct spi_master *master = data;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct spi_device *spi;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "failed to allocate SPI device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_HANDLE_SET(&spi->dev, handle);
+ spi->irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, spi);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !spi->max_speed_hz) {
+ spi_dev_put(spi);
+ return AE_OK;
+ }
+
+ strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
+ if (spi_add_device(spi)) {
+ dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ spi_dev_put(spi);
+ }
+
+ return AE_OK;
+}
+
+static void acpi_register_spi_devices(struct spi_master *master)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&master->dev);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_spi_add_device, NULL,
+ master, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
+}
+#else
+static inline void acpi_register_spi_devices(struct spi_master *master) {}
+#endif /* CONFIG_ACPI */
+
static void spi_master_release(struct device *dev)
{
struct spi_master *master;
@@ -1023,8 +1123,9 @@ int spi_register_master(struct spi_master *master)
spi_match_master_to_boardinfo(master, &bi->board_info);
mutex_unlock(&board_lock);
- /* Register devices from the device tree */
+ /* Register devices from the device tree and ACPI */
of_register_spi_devices(master);
+ acpi_register_spi_devices(master);
done:
return status;
}