summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-03-17 19:40:50 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-03-17 19:40:50 -0700
commit688d794c4c3f8b08c814381ee2edd3ede5856056 (patch)
treeef680add71e2a9588d07d8b594edbc1b5cd127d7 /drivers/base
parent16142655269aaf580488e074eabfdcf0fb4e3687 (diff)
parenta937536b868b8369b98967929045f1df54234323 (diff)
downloadlinux-stable-688d794c4c3f8b08c814381ee2edd3ede5856056.tar.gz
linux-stable-688d794c4c3f8b08c814381ee2edd3ede5856056.tar.bz2
linux-stable-688d794c4c3f8b08c814381ee2edd3ede5856056.zip
Merge tag 'v3.9-rc3' into next
Merge with mainline to bring in module_platform_driver_probe() and devm_ioremap_resource().
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig18
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/attribute_container.c2
-rw-r--r--drivers/base/bus.c20
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/core.c39
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/devtmpfs.c5
-rw-r--r--drivers/base/dma-buf.c77
-rw-r--r--drivers/base/dma-contiguous.c24
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c598
-rw-r--r--drivers/base/memory.c52
-rw-r--r--drivers/base/node.c86
-rw-r--r--drivers/base/pinctrl.c69
-rw-r--r--drivers/base/platform.c37
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c14
-rw-r--r--drivers/base/power/main.c11
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/base/power/power.h14
-rw-r--r--drivers/base/power/qos.c475
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c95
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/Makefile2
-rw-r--r--drivers/base/regmap/internal.h44
-rw-r--r--drivers/base/regmap/regcache-flat.c72
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c191
-rw-r--r--drivers/base/regmap/regmap-irq.c145
-rw-r--r--drivers/base/regmap/regmap-mmio.c79
-rw-r--r--drivers/base/regmap/regmap-spi.c54
-rw-r--r--drivers/base/regmap/regmap.c604
36 files changed, 2271 insertions, 729 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index b34b5cda5ae1..07abd9d76f7f 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -57,7 +57,7 @@ config DEVTMPFS_MOUNT
on the rootfs is completely empty.
config STANDALONE
- bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL
+ bool "Select only drivers that don't need compile-time external firmware"
default y
help
Select this option if you don't have magic firmware for drivers that
@@ -145,6 +145,17 @@ config EXTRA_FIRMWARE_DIR
this option you can point it elsewhere, such as /lib/firmware/ or
some other directory containing the firmware files.
+config FW_LOADER_USER_HELPER
+ bool "Fallback user-helper invocation for firmware loading"
+ depends on FW_LOADER
+ default y
+ help
+ This option enables / disables the invocation of user-helper
+ (e.g. udev) for loading firmware files as a fallback after the
+ direct file loading in kernel fails. The user-mode helper is
+ no longer required unless you have a special firmware file that
+ resides in a non-standard path.
+
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
depends on DEBUG_KERNEL
@@ -185,7 +196,6 @@ config DMA_SHARED_BUFFER
bool
default n
select ANON_INODES
- depends on EXPERIMENTAL
help
This option enables the framework for buffer-sharing between
multiple drivers. A buffer is associated with a file using driver
@@ -193,8 +203,8 @@ config DMA_SHARED_BUFFER
driver.
config CMA
- bool "Contiguous Memory Allocator (EXPERIMENTAL)"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ bool "Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
select MIGRATION
select MEMORY_ISOLATION
help
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..4e22ce3ed73d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 8fc200b2e2c0..d78b204e65c1 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -158,7 +158,7 @@ attribute_container_add_device(struct device *dev,
ic = kzalloc(sizeof(*ic), GFP_KERNEL);
if (!ic) {
- dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
+ dev_err(dev, "failed to allocate class container\n");
continue;
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 181ed2660b33..519865b53f76 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -164,8 +164,6 @@ static const struct kset_uevent_ops bus_uevent_ops = {
static struct kset *bus_kset;
-
-#ifdef CONFIG_HOTPLUG
/* Manually detach a device from its associated driver. */
static ssize_t driver_unbind(struct device_driver *drv,
const char *buf, size_t count)
@@ -252,7 +250,6 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
return -EINVAL;
return count;
}
-#endif
static struct device *next_device(struct klist_iter *i)
{
@@ -293,7 +290,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
struct device *dev;
int error = 0;
- if (!bus)
+ if (!bus || !bus->p)
return -EINVAL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -327,7 +324,7 @@ struct device *bus_find_device(struct bus_type *bus,
struct klist_iter i;
struct device *dev;
- if (!bus)
+ if (!bus || !bus->p)
return NULL;
klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -618,11 +615,6 @@ static void driver_remove_attrs(struct bus_type *bus,
}
}
-#ifdef CONFIG_HOTPLUG
-/*
- * Thanks to drivers making their tables __devinit, we can't allow manual
- * bind and unbind from userspace unless CONFIG_HOTPLUG is enabled.
- */
static int __must_check add_bind_files(struct device_driver *drv)
{
int ret;
@@ -666,12 +658,6 @@ static void remove_probe_files(struct bus_type *bus)
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
}
-#else
-static inline int add_bind_files(struct device_driver *drv) { return 0; }
-static inline void remove_bind_files(struct device_driver *drv) {}
-static inline int add_probe_files(struct bus_type *bus) { return 0; }
-static inline void remove_probe_files(struct bus_type *bus) {}
-#endif
static ssize_t driver_uevent_store(struct device_driver *drv,
const char *buf, size_t count)
@@ -714,12 +700,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_unregister;
+ klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
if (drv->bus->p->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_unregister;
}
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
module_add_driver(drv->owner, drv);
error = driver_create_file(drv, &driver_attr_uevent);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 03243d4002fd..3ce845471327 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* code. There's no locking restriction.
*/
struct device *class_find_device(struct class *class, struct device *start,
- void *data,
- int (*match)(struct device *, void *))
+ const void *data,
+ int (*match)(struct device *, const void *))
{
struct class_dev_iter iter;
struct device *dev;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index abea76c36a4b..56536f4b0f6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -171,6 +171,27 @@ ssize_t device_show_int(struct device *dev,
}
EXPORT_SYMBOL_GPL(device_show_int);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ if (strtobool(buf, ea->var) < 0)
+ return -EINVAL;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -1180,7 +1201,6 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_pm_remove(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1205,6 +1225,7 @@ void device_del(struct device *dev)
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
+ device_pm_remove(dev);
driver_deferred_probe_del(dev);
/* Notify the platform of the removal, in case they
@@ -1399,7 +1420,7 @@ struct root_device {
struct module *owner;
};
-inline struct root_device *to_root_device(struct device *d)
+static inline struct root_device *to_root_device(struct device *d)
{
return container_of(d, struct root_device, dev);
}
@@ -1596,9 +1617,9 @@ struct device *device_create(struct class *class, struct device *parent,
}
EXPORT_SYMBOL_GPL(device_create);
-static int __match_devt(struct device *dev, void *data)
+static int __match_devt(struct device *dev, const void *data)
{
- dev_t *devt = data;
+ const dev_t *devt = data;
return dev->devt == *devt;
}
@@ -1664,8 +1685,6 @@ EXPORT_SYMBOL_GPL(device_destroy);
*/
int device_rename(struct device *dev, const char *new_name)
{
- char *old_class_name = NULL;
- char *new_class_name = NULL;
char *old_device_name = NULL;
int error;
@@ -1696,8 +1715,6 @@ int device_rename(struct device *dev, const char *new_name)
out:
put_device(dev);
- kfree(new_class_name);
- kfree(old_class_name);
kfree(old_device_name);
return error;
@@ -1840,10 +1857,12 @@ void device_shutdown(void)
pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
- dev_dbg(dev, "shutdown\n");
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
} else if (dev->driver && dev->driver->shutdown) {
- dev_dbg(dev, "shutdown\n");
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
dev->driver->shutdown(dev);
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 63452943abd1..fb10728f6372 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
* by the cpu device.
*
* Never copy this way of doing things, or you too will be made fun of
- * on the linux-kerenl list, you have been warned.
+ * on the linux-kernel list, you have been warned.
*/
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e3bbed8a617c..bb5645ea0282 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
#include "base.h"
#include "power/power.h"
@@ -172,6 +173,8 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
+ /* Sort as many dependencies as possible before exiting initcalls */
+ flush_workqueue(deferred_wq);
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -269,6 +272,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
WARN_ON(!list_empty(&dev->devres_head));
dev->driver = drv;
+
+ /* If using pinctrl, bind pins now before probing */
+ ret = pinctrl_bind_pins(dev);
+ if (ret)
+ goto probe_failed;
+
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 724957a13d48..507379e7b763 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -50,8 +50,8 @@ static void devres_log(struct device *dev, struct devres_node *node,
const char *op)
{
if (unlikely(log_devres))
- dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
- op, node, node->name, (unsigned long)node->size);
+ dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
+ op, node, node->name, (unsigned long)node->size);
}
#else /* CONFIG_DEBUG_DEVRES */
#define set_node_dbginfo(node, n, s) do {} while (0)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 147d1a4dd269..01fc5b07f951 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -148,7 +148,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -302,7 +302,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (dentry->d_inode) {
struct kstat stat;
- err = vfs_getattr(parent.mnt, dentry, &stat);
+ struct path p = {.mnt = parent.mnt, .dentry = dentry};
+ err = vfs_getattr(&p, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
struct iattr newattrs;
/*
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 460e22dee36d..2a7cb0df176b 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -39,6 +39,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dmabuf = file->private_data;
+ BUG_ON(dmabuf->vmapping_counter);
+
dmabuf->ops->release(dmabuf);
kfree(dmabuf);
return 0;
@@ -134,15 +136,14 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int error, fd;
+ int fd;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- error = get_unused_fd_flags(flags);
- if (error < 0)
- return error;
- fd = error;
+ fd = get_unused_fd_flags(flags);
+ if (fd < 0)
+ return fd;
fd_install(fd, dmabuf->file);
@@ -298,6 +299,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
+ might_sleep();
+
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
@@ -444,6 +447,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
+ struct file *oldfile;
+ int ret;
+
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -457,14 +463,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- if (vma->vm_file)
- fput(vma->vm_file);
-
- vma->vm_file = get_file(dmabuf->file);
-
+ get_file(dmabuf->file);
+ oldfile = vma->vm_file;
+ vma->vm_file = dmabuf->file;
vma->vm_pgoff = pgoff;
- return dmabuf->ops->mmap(dmabuf, vma);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ if (ret) {
+ /* restore old parameters on failure */
+ vma->vm_file = oldfile;
+ fput(dmabuf->file);
+ } else {
+ if (oldfile)
+ fput(oldfile);
+ }
+ return ret;
+
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
@@ -480,12 +494,34 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
+ void *ptr;
+
if (WARN_ON(!dmabuf))
return NULL;
- if (dmabuf->ops->vmap)
- return dmabuf->ops->vmap(dmabuf);
- return NULL;
+ if (!dmabuf->ops->vmap)
+ return NULL;
+
+ mutex_lock(&dmabuf->lock);
+ if (dmabuf->vmapping_counter) {
+ dmabuf->vmapping_counter++;
+ BUG_ON(!dmabuf->vmap_ptr);
+ ptr = dmabuf->vmap_ptr;
+ goto out_unlock;
+ }
+
+ BUG_ON(dmabuf->vmap_ptr);
+
+ ptr = dmabuf->ops->vmap(dmabuf);
+ if (IS_ERR_OR_NULL(ptr))
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+ dmabuf->vmapping_counter = 1;
+
+out_unlock:
+ mutex_unlock(&dmabuf->lock);
+ return ptr;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
@@ -499,7 +535,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
if (WARN_ON(!dmabuf))
return;
- if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
+ BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(dmabuf->vmapping_counter == 0);
+ BUG_ON(dmabuf->vmap_ptr != vaddr);
+
+ mutex_lock(&dmabuf->lock);
+ if (--dmabuf->vmapping_counter == 0) {
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+ dmabuf->vmap_ptr = NULL;
+ }
+ mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 612afcc5a938..0ca54421ce97 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -57,8 +57,8 @@ struct cma *dma_contiguous_default_area;
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
-static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
-static long size_cmdline = -1;
+static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
static int __init early_cma(char *p)
{
@@ -70,7 +70,7 @@ early_param("cma", early_cma);
#ifdef CONFIG_CMA_SIZE_PERCENTAGE
-static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
{
struct memblock_region *reg;
unsigned long total_pages = 0;
@@ -88,7 +88,7 @@ static unsigned long __init __maybe_unused cma_early_percent_memory(void)
#else
-static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
{
return 0;
}
@@ -106,7 +106,7 @@ static inline __maybe_unused unsigned long cma_early_percent_memory(void)
*/
void __init dma_contiguous_reserve(phys_addr_t limit)
{
- unsigned long selected_size = 0;
+ phys_addr_t selected_size = 0;
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
@@ -126,7 +126,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
if (selected_size) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
- selected_size / SZ_1M);
+ (unsigned long)selected_size / SZ_1M);
dma_declare_contiguous(NULL, selected_size, 0, limit);
}
@@ -227,11 +227,11 @@ core_initcall(cma_init_reserved_areas);
* called by board specific code when early allocator (memblock or bootmem)
* is still activate.
*/
-int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
phys_addr_t base, phys_addr_t limit)
{
struct cma_reserved *r = &cma_reserved[cma_reserved_count];
- unsigned long alignment;
+ phys_addr_t alignment;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
@@ -268,10 +268,6 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
if (!addr) {
base = -ENOMEM;
goto err;
- } else if (addr + size > ~(unsigned long)0) {
- memblock_free(addr, size);
- base = -EINVAL;
- goto err;
} else {
base = addr;
}
@@ -285,14 +281,14 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
r->size = size;
r->dev = dev;
cma_reserved_count++;
- pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(base, size);
return 0;
err:
- pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
return base;
}
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3fbedc75e7c5..0ce39a33b3c2 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -218,6 +218,8 @@ void dmam_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dmam_release_declared_memory);
+#endif
+
/*
* Create scatter-list for the already allocated DMA buffer.
*/
@@ -236,8 +238,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
}
EXPORT_SYMBOL(dma_common_get_sgtable);
-#endif
-
/*
* Create userspace mapping for the DMA-coherent memory.
*/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8945f4e489ed..4b1f9265887f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -88,11 +88,6 @@ enum {
FW_STATUS_ABORT,
};
-enum fw_buf_fmt {
- VMALLOC_BUF, /* used in direct loading */
- PAGE_BUF, /* used in loading via userspace */
-};
-
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -128,12 +123,14 @@ struct firmware_buf {
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
- enum fw_buf_fmt fmt;
void *data;
size_t size;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool is_paged_buf;
struct page **pages;
int nr_pages;
int page_array_size;
+#endif
char fw_id[];
};
@@ -142,14 +139,6 @@ struct fw_cache_entry {
char name[];
};
-struct firmware_priv {
- struct timer_list timeout;
- bool nowait;
- struct device dev;
- struct firmware_buf *buf;
- struct firmware *fw;
-};
-
struct fw_name_devm {
unsigned long magic;
char name[];
@@ -182,7 +171,6 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
- buf->fmt = VMALLOC_BUF;
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -240,45 +228,58 @@ static void __fw_free_buf(struct kref *ref)
{
struct firmware_buf *buf = to_fwbuf(ref);
struct firmware_cache *fwc = buf->fwc;
- int i;
pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
__func__, buf->fw_id, buf, buf->data,
(unsigned int)buf->size);
- spin_lock(&fwc->lock);
list_del(&buf->list);
spin_unlock(&fwc->lock);
-
- if (buf->fmt == PAGE_BUF) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ if (buf->is_paged_buf) {
+ int i;
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
kfree(buf->pages);
} else
+#endif
vfree(buf->data);
kfree(buf);
}
static void fw_free_buf(struct firmware_buf *buf)
{
- kref_put(&buf->ref, __fw_free_buf);
+ struct firmware_cache *fwc = buf->fwc;
+ spin_lock(&fwc->lock);
+ if (!kref_put(&buf->ref, __fw_free_buf))
+ spin_unlock(&fwc->lock);
}
/* direct firmware loading support */
-static const char *fw_path[] = {
+static char fw_path_para[256];
+static const char * const fw_path[] = {
+ fw_path_para,
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
"/lib/firmware"
};
+/*
+ * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
+ * from kernel command line because firmware_class is generally built in
+ * kernel instead of module.
+ */
+module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
+MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
+
/* Don't inline this: 'struct kstat' is biggish */
-static noinline long fw_file_size(struct file *file)
+static noinline_for_stack long fw_file_size(struct file *file)
{
struct kstat st;
- if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ if (vfs_getattr(&file->f_path, &st))
return -1;
if (!S_ISREG(st.mode))
return -1;
@@ -293,7 +294,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
char *buf;
size = fw_file_size(file);
- if (size < 0)
+ if (size <= 0)
return false;
buf = vmalloc(size);
if (!buf)
@@ -307,7 +308,8 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
return true;
}
-static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+static bool fw_get_filesystem_firmware(struct device *device,
+ struct firmware_buf *buf)
{
int i;
bool success = false;
@@ -315,6 +317,11 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
struct file *file;
+
+ /* skip the unset customized path */
+ if (!fw_path[i][0])
+ continue;
+
snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
file = filp_open(path, O_RDONLY, 0);
@@ -326,9 +333,114 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
break;
}
__putname(path);
+
+ if (success) {
+ dev_dbg(device, "firmware: direct-loading firmware %s\n",
+ buf->fw_id);
+ mutex_lock(&fw_lock);
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ mutex_unlock(&fw_lock);
+ }
+
return success;
}
+/* firmware holds the ownership of pages */
+static void firmware_free_data(const struct firmware *fw)
+{
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
+ }
+ fw_free_buf(fw->priv);
+}
+
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+{
+ fw->priv = buf;
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ fw->pages = buf->pages;
+#endif
+ fw->size = buf->size;
+ fw->data = buf->data;
+
+ pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
+ __func__, buf->fw_id, buf, buf->data,
+ (unsigned int)buf->size);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+ struct fw_name_devm *fwn = res;
+
+ if (fwn->magic == (unsigned long)&fw_cache)
+ pr_debug("%s: fw_name-%s devm-%p released\n",
+ __func__, fwn->name, res);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+
+ return (fwn->magic == (unsigned long)&fw_cache) &&
+ !strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+ const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = devres_find(dev, fw_name_devm_release,
+ fw_devm_match, (void *)name);
+ return fwn;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = fw_find_devm_name(dev, name);
+ if (fwn)
+ return 1;
+
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
+ strlen(name) + 1, GFP_KERNEL);
+ if (!fwn)
+ return -ENOMEM;
+
+ fwn->magic = (unsigned long)&fw_cache;
+ strcpy(fwn->name, name);
+ devres_add(dev, fwn);
+
+ return 0;
+}
+#else
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ return 0;
+}
+#endif
+
+
+/*
+ * user-mode helper code
+ */
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+struct firmware_priv {
+ struct delayed_work timeout_work;
+ bool nowait;
+ struct device dev;
+ struct firmware_buf *buf;
+ struct firmware *fw;
+};
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
@@ -342,6 +454,9 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
complete_all(&buf->completion);
}
+#define is_fw_load_aborted(buf) \
+ test_bit(FW_STATUS_ABORT, &(buf)->status)
+
static ssize_t firmware_timeout_show(struct class *class,
struct class_attribute *attr,
char *buf)
@@ -418,17 +533,6 @@ static ssize_t firmware_loading_show(struct device *dev,
return sprintf(buf, "%d\n", loading);
}
-/* firmware holds the ownership of pages */
-static void firmware_free_data(const struct firmware *fw)
-{
- /* Loaded directly? */
- if (!fw->priv) {
- vfree(fw->data);
- return;
- }
- fw_free_buf(fw->priv);
-}
-
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -437,7 +541,7 @@ static void firmware_free_data(const struct firmware *fw)
/* one pages buffer should be mapped/unmapped only once */
static int fw_map_pages_buf(struct firmware_buf *buf)
{
- if (buf->fmt != PAGE_BUF)
+ if (!buf->is_paged_buf)
return 0;
if (buf->data)
@@ -667,11 +771,18 @@ static struct bin_attribute firmware_attr_data = {
.write = firmware_data_write,
};
-static void firmware_class_timeout(u_long data)
+static void firmware_class_timeout_work(struct work_struct *work)
{
- struct firmware_priv *fw_priv = (struct firmware_priv *) data;
+ struct firmware_priv *fw_priv = container_of(work,
+ struct firmware_priv, timeout_work.work);
+ mutex_lock(&fw_lock);
+ if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
+ mutex_unlock(&fw_lock);
+ return;
+ }
fw_load_abort(fw_priv);
+ mutex_unlock(&fw_lock);
}
static struct firmware_priv *
@@ -690,8 +801,8 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
fw_priv->nowait = nowait;
fw_priv->fw = firmware;
- setup_timer(&fw_priv->timeout,
- firmware_class_timeout, (u_long) fw_priv);
+ INIT_DELAYED_WORK(&fw_priv->timeout_work,
+ firmware_class_timeout_work);
f_dev = &fw_priv->dev;
@@ -703,248 +814,243 @@ exit:
return fw_priv;
}
-/* store the pages buffer info firmware from buf */
-static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+/* load a firmware via user helper */
+static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
+ long timeout)
{
- fw->priv = buf;
- fw->pages = buf->pages;
- fw->size = buf->size;
- fw->data = buf->data;
+ int retval = 0;
+ struct device *f_dev = &fw_priv->dev;
+ struct firmware_buf *buf = fw_priv->buf;
- pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
- __func__, buf->fw_id, buf, buf->data,
- (unsigned int)buf->size);
-}
+ /* fall back on userspace loading */
+ buf->is_paged_buf = true;
-#ifdef CONFIG_PM_SLEEP
-static void fw_name_devm_release(struct device *dev, void *res)
-{
- struct fw_name_devm *fwn = res;
+ dev_set_uevent_suppress(f_dev, true);
- if (fwn->magic == (unsigned long)&fw_cache)
- pr_debug("%s: fw_name-%s devm-%p released\n",
- __func__, fwn->name, res);
-}
+ /* Need to pin this module until class device is destroyed */
+ __module_get(THIS_MODULE);
-static int fw_devm_match(struct device *dev, void *res,
- void *match_data)
-{
- struct fw_name_devm *fwn = res;
+ retval = device_add(f_dev);
+ if (retval) {
+ dev_err(f_dev, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
+ }
- return (fwn->magic == (unsigned long)&fw_cache) &&
- !strcmp(fwn->name, match_data);
-}
+ retval = device_create_bin_file(f_dev, &firmware_attr_data);
+ if (retval) {
+ dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
+ goto err_del_dev;
+ }
-static struct fw_name_devm *fw_find_devm_name(struct device *dev,
- const char *name)
-{
- struct fw_name_devm *fwn;
+ retval = device_create_file(f_dev, &dev_attr_loading);
+ if (retval) {
+ dev_err(f_dev, "%s: device_create_file failed\n", __func__);
+ goto err_del_bin_attr;
+ }
- fwn = devres_find(dev, fw_name_devm_release,
- fw_devm_match, (void *)name);
- return fwn;
-}
+ if (uevent) {
+ dev_set_uevent_suppress(f_dev, false);
+ dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
+ if (timeout != MAX_SCHEDULE_TIMEOUT)
+ schedule_delayed_work(&fw_priv->timeout_work, timeout);
-/* add firmware name into devres list */
-static int fw_add_devm_name(struct device *dev, const char *name)
-{
- struct fw_name_devm *fwn;
+ kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
+ }
- fwn = fw_find_devm_name(dev, name);
- if (fwn)
- return 1;
+ wait_for_completion(&buf->completion);
- fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
- strlen(name) + 1, GFP_KERNEL);
- if (!fwn)
- return -ENOMEM;
+ cancel_delayed_work_sync(&fw_priv->timeout_work);
- fwn->magic = (unsigned long)&fw_cache;
- strcpy(fwn->name, name);
- devres_add(dev, fwn);
+ fw_priv->buf = NULL;
- return 0;
+ device_remove_file(f_dev, &dev_attr_loading);
+err_del_bin_attr:
+ device_remove_bin_file(f_dev, &firmware_attr_data);
+err_del_dev:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+ return retval;
}
-#else
-static int fw_add_devm_name(struct device *dev, const char *name)
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+ const char *name, struct device *device,
+ bool uevent, bool nowait, long timeout)
{
- return 0;
+ struct firmware_priv *fw_priv;
+
+ fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
+ if (IS_ERR(fw_priv))
+ return PTR_ERR(fw_priv);
+
+ fw_priv->buf = firmware->priv;
+ return _request_firmware_load(fw_priv, uevent, timeout);
}
-#endif
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int
+fw_load_from_user_helper(struct firmware *firmware, const char *name,
+ struct device *device, bool uevent, bool nowait,
+ long timeout)
+{
+ return -ENOENT;
+}
+
+/* No abort during direct loading */
+#define is_fw_load_aborted(buf) false
-static void _request_firmware_cleanup(const struct firmware **firmware_p)
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+
+/* wait until the shared firmware_buf becomes ready (or error) */
+static int sync_cached_firmware_buf(struct firmware_buf *buf)
{
- release_firmware(*firmware_p);
- *firmware_p = NULL;
+ int ret = 0;
+
+ mutex_lock(&fw_lock);
+ while (!test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (is_fw_load_aborted(buf)) {
+ ret = -ENOENT;
+ break;
+ }
+ mutex_unlock(&fw_lock);
+ wait_for_completion(&buf->completion);
+ mutex_lock(&fw_lock);
+ }
+ mutex_unlock(&fw_lock);
+ return ret;
}
-static struct firmware_priv *
-_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
- struct device *device, bool uevent, bool nowait)
+/* prepare firmware and firmware_buf structs;
+ * return 0 if a firmware is already assigned, 1 if need to load one,
+ * or a negative error code
+ */
+static int
+_request_firmware_prepare(struct firmware **firmware_p, const char *name,
+ struct device *device)
{
struct firmware *firmware;
- struct firmware_priv *fw_priv = NULL;
struct firmware_buf *buf;
int ret;
- if (!firmware_p)
- return ERR_PTR(-EINVAL);
-
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
if (fw_get_builtin_firmware(firmware, name)) {
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
- return NULL;
+ return 0; /* assigned */
}
ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
- if (!ret)
- fw_priv = fw_create_instance(firmware, name, device,
- uevent, nowait);
- if (IS_ERR(fw_priv) || ret < 0) {
- kfree(firmware);
- *firmware_p = NULL;
- return ERR_PTR(-ENOMEM);
- } else if (fw_priv) {
- fw_priv->buf = buf;
-
- /*
- * bind with 'buf' now to avoid warning in failure path
- * of requesting firmware.
- */
- firmware->priv = buf;
- return fw_priv;
- }
+ /*
+ * bind with 'buf' now to avoid warning in failure path
+ * of requesting firmware.
+ */
+ firmware->priv = buf;
- /* share the cached buf, which is inprogessing or completed */
- check_status:
- mutex_lock(&fw_lock);
- if (test_bit(FW_STATUS_ABORT, &buf->status)) {
- fw_priv = ERR_PTR(-ENOENT);
- firmware->priv = buf;
- _request_firmware_cleanup(firmware_p);
- goto exit;
- } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
- fw_priv = NULL;
- fw_set_page_data(buf, firmware);
- goto exit;
+ if (ret > 0) {
+ ret = sync_cached_firmware_buf(buf);
+ if (!ret) {
+ fw_set_page_data(buf, firmware);
+ return 0; /* assigned */
+ }
}
- mutex_unlock(&fw_lock);
- wait_for_completion(&buf->completion);
- goto check_status;
-exit:
- mutex_unlock(&fw_lock);
- return fw_priv;
+ if (ret < 0)
+ return ret;
+ return 1; /* need to load */
}
-static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
- long timeout)
+static int assign_firmware_buf(struct firmware *fw, struct device *device)
{
- int retval = 0;
- struct device *f_dev = &fw_priv->dev;
- struct firmware_buf *buf = fw_priv->buf;
- struct firmware_cache *fwc = &fw_cache;
- int direct_load = 0;
-
- /* try direct loading from fs first */
- if (fw_get_filesystem_firmware(buf)) {
- dev_dbg(f_dev->parent, "firmware: direct-loading"
- " firmware %s\n", buf->fw_id);
-
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- direct_load = 1;
- goto handle_fw;
- }
+ struct firmware_buf *buf = fw->priv;
- /* fall back on userspace loading */
- buf->fmt = PAGE_BUF;
-
- dev_set_uevent_suppress(f_dev, true);
-
- /* Need to pin this module until class device is destroyed */
- __module_get(THIS_MODULE);
-
- retval = device_add(f_dev);
- if (retval) {
- dev_err(f_dev, "%s: device_register failed\n", __func__);
- goto err_put_dev;
- }
-
- retval = device_create_bin_file(f_dev, &firmware_attr_data);
- if (retval) {
- dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
- goto err_del_dev;
- }
-
- retval = device_create_file(f_dev, &dev_attr_loading);
- if (retval) {
- dev_err(f_dev, "%s: device_create_file failed\n", __func__);
- goto err_del_bin_attr;
- }
-
- if (uevent) {
- dev_set_uevent_suppress(f_dev, false);
- dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
- if (timeout != MAX_SCHEDULE_TIMEOUT)
- mod_timer(&fw_priv->timeout,
- round_jiffies_up(jiffies + timeout));
-
- kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
- }
-
- wait_for_completion(&buf->completion);
-
- del_timer_sync(&fw_priv->timeout);
-
-handle_fw:
mutex_lock(&fw_lock);
- if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
- retval = -ENOENT;
+ if (!buf->size || is_fw_load_aborted(buf)) {
+ mutex_unlock(&fw_lock);
+ return -ENOENT;
+ }
/*
* add firmware name into devres list so that we can auto cache
* and uncache firmware for device.
*
- * f_dev->parent may has been deleted already, but the problem
+ * device may has been deleted already, but the problem
* should be fixed in devres or driver core.
*/
- if (!retval && f_dev->parent)
- fw_add_devm_name(f_dev->parent, buf->fw_id);
+ if (device)
+ fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (!retval && fwc->state == FW_LOADER_START_CACHE) {
+ if (buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
/* pass the pages buffer to driver at the last minute */
- fw_set_page_data(buf, fw_priv->fw);
-
- fw_priv->buf = NULL;
+ fw_set_page_data(buf, fw);
mutex_unlock(&fw_lock);
+ return 0;
+}
- if (direct_load)
- goto err_put_dev;
+/* called from request_firmware() and request_firmware_work_func() */
+static int
+_request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device, bool uevent, bool nowait)
+{
+ struct firmware *fw;
+ long timeout;
+ int ret;
- device_remove_file(f_dev, &dev_attr_loading);
-err_del_bin_attr:
- device_remove_bin_file(f_dev, &firmware_attr_data);
-err_del_dev:
- device_del(f_dev);
-err_put_dev:
- put_device(f_dev);
- return retval;
+ if (!firmware_p)
+ return -EINVAL;
+
+ ret = _request_firmware_prepare(&fw, name, device);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+
+ ret = 0;
+ timeout = firmware_loading_timeout();
+ if (nowait) {
+ timeout = usermodehelper_read_lock_wait(timeout);
+ if (!timeout) {
+ dev_dbg(device, "firmware: %s loading timed out\n",
+ name);
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n",
+ name);
+ goto out;
+ }
+ }
+
+ if (!fw_get_filesystem_firmware(device, fw->priv))
+ ret = fw_load_from_user_helper(fw, name, device,
+ uevent, nowait, timeout);
+ if (!ret)
+ ret = assign_firmware_buf(fw, device);
+
+ usermodehelper_read_unlock();
+
+ out:
+ if (ret < 0) {
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ *firmware_p = fw;
+ return ret;
}
/**
@@ -963,31 +1069,15 @@ err_put_dev:
* firmware image for this or any other device.
*
* Caller must hold the reference count of @device.
+ *
+ * The function can be called safely inside device's suspend and
+ * resume callback.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
- struct firmware_priv *fw_priv;
- int ret;
-
- fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
- false);
- if (IS_ERR_OR_NULL(fw_priv))
- return PTR_RET(fw_priv);
-
- ret = usermodehelper_read_trylock();
- if (WARN_ON(ret)) {
- dev_err(device, "firmware: %s will not be loaded\n", name);
- } else {
- ret = _request_firmware_load(fw_priv, true,
- firmware_loading_timeout());
- usermodehelper_read_unlock();
- }
- if (ret)
- _request_firmware_cleanup(firmware_p);
-
- return ret;
+ return _request_firmware(firmware_p, name, device, true, false);
}
/**
@@ -1018,33 +1108,13 @@ static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work;
const struct firmware *fw;
- struct firmware_priv *fw_priv;
- long timeout;
- int ret;
fw_work = container_of(work, struct firmware_work, work);
- fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
- fw_work->uevent, true);
- if (IS_ERR_OR_NULL(fw_priv)) {
- ret = PTR_RET(fw_priv);
- goto out;
- }
-
- timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
- if (timeout) {
- ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
- usermodehelper_read_unlock();
- } else {
- dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
- fw_work->name);
- ret = -EAGAIN;
- }
- if (ret)
- _request_firmware_cleanup(&fw);
- out:
+ _request_firmware(&fw, fw_work->name, fw_work->device,
+ fw_work->uevent, true);
fw_work->cont(fw, fw_work->context);
- put_device(fw_work->device);
+ put_device(fw_work->device); /* taken in request_firmware_nowait() */
module_put(fw_work->module);
kfree(fw_work);
@@ -1446,7 +1516,11 @@ static void __init fw_cache_init(void)
static int __init firmware_class_init(void)
{
fw_cache_init();
+#ifdef CONFIG_FW_LOADER_USER_HELPER
return class_register(&firmware_class);
+#else
+ return 0;
+#endif
}
static void __exit firmware_class_exit(void)
@@ -1455,7 +1529,9 @@ static void __exit firmware_class_exit(void)
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
class_unregister(&firmware_class);
+#endif
}
fs_initcall(firmware_class_init);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 86c88216a503..a51007b79032 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,6 +70,13 @@ void unregister_memory_isolate_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_isolate_notifier);
+static void memory_block_release(struct device *dev)
+{
+ struct memory_block *mem = container_of(dev, struct memory_block, dev);
+
+ kfree(mem);
+}
+
/*
* register_memory - Setup a sysfs device for a memory block
*/
@@ -80,6 +87,7 @@ int register_memory(struct memory_block *memory)
memory->dev.bus = &memory_subsys;
memory->dev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.release = memory_block_release;
error = device_register(&memory->dev);
return error;
@@ -246,7 +254,7 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long phys_index, unsigned long action)
+memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -261,7 +269,7 @@ memory_block_action(unsigned long phys_index, unsigned long action)
if (!pages_correctly_reserved(start_pfn, nr_pages))
return -EBUSY;
- ret = online_pages(start_pfn, nr_pages);
+ ret = online_pages(start_pfn, nr_pages, online_type);
break;
case MEM_OFFLINE:
ret = offline_pages(start_pfn, nr_pages);
@@ -276,7 +284,8 @@ memory_block_action(unsigned long phys_index, unsigned long action)
}
static int __memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req)
+ unsigned long to_state, unsigned long from_state_req,
+ int online_type)
{
int ret = 0;
@@ -288,7 +297,7 @@ static int __memory_block_change_state(struct memory_block *mem,
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
- ret = memory_block_action(mem->start_section_nr, to_state);
+ ret = memory_block_action(mem->start_section_nr, to_state, online_type);
if (ret) {
mem->state = from_state_req;
@@ -311,12 +320,14 @@ out:
}
static int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req)
+ unsigned long to_state, unsigned long from_state_req,
+ int online_type)
{
int ret;
mutex_lock(&mem->state_mutex);
- ret = __memory_block_change_state(mem, to_state, from_state_req);
+ ret = __memory_block_change_state(mem, to_state, from_state_req,
+ online_type);
mutex_unlock(&mem->state_mutex);
return ret;
@@ -330,10 +341,18 @@ store_mem_state(struct device *dev,
mem = container_of(dev, struct memory_block, dev);
- if (!strncmp(buf, "online", min((int)count, 6)))
- ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
- else if(!strncmp(buf, "offline", min((int)count, 7)))
- ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_KERNEL);
+ else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_MOVABLE);
+ else if (!strncmp(buf, "online", min_t(int, count, 6)))
+ ret = memory_block_change_state(mem, MEM_ONLINE,
+ MEM_OFFLINE, ONLINE_KEEP);
+ else if(!strncmp(buf, "offline", min_t(int, count, 7)))
+ ret = memory_block_change_state(mem, MEM_OFFLINE,
+ MEM_ONLINE, -1);
if (ret)
return ret;
@@ -475,8 +494,8 @@ store_hard_offline_page(struct device *dev,
return ret ? ret : count;
}
-static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
@@ -635,7 +654,6 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
mem_remove_simple_file(mem, phys_device);
mem_remove_simple_file(mem, removable);
unregister_memory(mem);
- kfree(mem);
} else
kobject_put(&mem->dev.kobj);
@@ -669,12 +687,18 @@ int offline_memory_block(struct memory_block *mem)
mutex_lock(&mem->state_mutex);
if (mem->state != MEM_OFFLINE)
- ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
mutex_unlock(&mem->state_mutex);
return ret;
}
+/* return true if the memory block is offlined, otherwise, return false */
+bool is_memblock_offlined(struct memory_block *mem)
+{
+ return mem->state == MEM_OFFLINE;
+}
+
/*
* Initialize the sysfs support for memory devices...
*/
diff --git a/drivers/base/node.c b/drivers/base/node.c
index af1a177216f1..fac124a7e1c5 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -227,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->dev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -252,6 +252,24 @@ static inline void hugetlb_register_node(struct node *node) {}
static inline void hugetlb_unregister_node(struct node *node) {}
#endif
+static void node_device_release(struct device *dev)
+{
+ struct node *node = to_node(dev);
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ /*
+ * We schedule the work only when a memory section is
+ * onlined/offlined on this node. When we come here,
+ * all the memory on this node has been offlined,
+ * so we won't enqueue new work to this work.
+ *
+ * The work is using node->node_work, so we should
+ * flush work before freeing the memory.
+ */
+ flush_work(&node->node_work);
+#endif
+ kfree(node);
+}
/*
* register_node - Setup a sysfs device for a node.
@@ -259,12 +277,13 @@ static inline void hugetlb_unregister_node(struct node *node) {}
*
* Initialize and register the node device.
*/
-int register_node(struct node *node, int num, struct node *parent)
+static int register_node(struct node *node, int num, struct node *parent)
{
int error;
node->dev.id = num;
node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
error = device_register(&node->dev);
if (!error){
@@ -306,7 +325,7 @@ void unregister_node(struct node *node)
device_unregister(&node->dev);
}
-struct node node_devices[MAX_NUMNODES];
+struct node *node_devices[MAX_NUMNODES];
/*
* register cpu under node
@@ -323,15 +342,15 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
if (!obj)
return 0;
- ret = sysfs_create_link(&node_devices[nid].dev.kobj,
+ ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
@@ -345,10 +364,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
if (!obj)
return 0;
- sysfs_remove_link(&node_devices[nid].dev.kobj,
+ sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ kobject_name(&node_devices[nid]->dev.kobj));
return 0;
}
@@ -390,15 +409,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
- &node_devices[nid].dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -431,10 +450,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
- sysfs_remove_link(&node_devices[nid].dev.kobj,
+ sysfs_remove_link(&node_devices[nid]->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
sysfs_remove_link(&mem_blk->dev.kobj,
- kobject_name(&node_devices[nid].dev.kobj));
+ kobject_name(&node_devices[nid]->dev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
@@ -500,7 +519,7 @@ static void node_hugetlb_work(struct work_struct *work)
static void init_node_hugetlb_work(int nid)
{
- INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+ INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
}
static int node_memory_callback(struct notifier_block *self,
@@ -517,7 +536,7 @@ static int node_memory_callback(struct notifier_block *self,
* when transitioning to/from memoryless state.
*/
if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid].node_work);
+ schedule_work(&node_devices[nid]->node_work);
break;
case MEM_GOING_ONLINE:
@@ -558,9 +577,13 @@ int register_one_node(int nid)
struct node *parent = NULL;
if (p_node != nid)
- parent = &node_devices[p_node];
+ parent = node_devices[p_node];
+
+ node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+ if (!node_devices[nid])
+ return -ENOMEM;
- error = register_node(&node_devices[nid], nid, parent);
+ error = register_node(node_devices[nid], nid, parent);
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -581,7 +604,8 @@ int register_one_node(int nid)
void unregister_one_node(int nid)
{
- unregister_node(&node_devices[nid]);
+ unregister_node(node_devices[nid]);
+ node_devices[nid] = NULL;
}
/*
@@ -614,23 +638,29 @@ static ssize_t show_node_state(struct device *dev,
{ __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
- _NODE_ATTR(possible, N_POSSIBLE),
- _NODE_ATTR(online, N_ONLINE),
- _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
- _NODE_ATTR(has_cpu, N_CPU),
+ [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
+ [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
+ [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
#ifdef CONFIG_HIGHMEM
- _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
+ [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
+#ifdef CONFIG_MOVABLE_NODE
+ [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
+#endif
+ [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
};
static struct attribute *node_state_attrs[] = {
- &node_state_attr[0].attr.attr,
- &node_state_attr[1].attr.attr,
- &node_state_attr[2].attr.attr,
- &node_state_attr[3].attr.attr,
+ &node_state_attr[N_POSSIBLE].attr.attr,
+ &node_state_attr[N_ONLINE].attr.attr,
+ &node_state_attr[N_NORMAL_MEMORY].attr.attr,
#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr.attr,
+ &node_state_attr[N_HIGH_MEMORY].attr.attr,
+#endif
+#ifdef CONFIG_MOVABLE_NODE
+ &node_state_attr[N_MEMORY].attr.attr,
#endif
+ &node_state_attr[N_CPU].attr.attr,
NULL
};
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000000..67a274e86727
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,69 @@
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+ int ret;
+
+ dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+ if (!dev->pins)
+ return -ENOMEM;
+
+ dev->pins->p = devm_pinctrl_get(dev);
+ if (IS_ERR(dev->pins->p)) {
+ dev_dbg(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(dev->pins->p);
+ goto cleanup_alloc;
+ }
+
+ dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins->default_state)) {
+ dev_dbg(dev, "no default pinctrl state\n");
+ ret = 0;
+ goto cleanup_get;
+ }
+
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+ if (ret) {
+ dev_dbg(dev, "failed to activate default pinctrl state\n");
+ goto cleanup_get;
+ }
+
+ return 0;
+
+ /*
+ * If no pinctrl handle or default state was found for this device,
+ * let's explicitly free the pin container in the device, there is
+ * no point in keeping it around.
+ */
+cleanup_get:
+ devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+ devm_kfree(dev, dev->pins);
+ dev->pins = NULL;
+
+ /* Only return deferrals */
+ if (ret != -EPROBE_DEFER)
+ ret = 0;
+
+ return ret;
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8727e9c5eea4..c0b8df38402b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
+#include <linux/acpi.h>
#include "base.h"
#include "power/power.h"
@@ -44,7 +45,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
* be setup before the platform_notifier is called. So if a user needs to
* manipulate any relevant information in the pdev_archdata they can do:
*
- * platform_devic_alloc()
+ * platform_device_alloc()
* ... manipulate ...
* platform_device_add()
*
@@ -83,9 +84,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ return -ENXIO;
+ return dev->archdata.irqs[num];
+#else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
+#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
@@ -115,7 +123,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
EXPORT_SYMBOL_GPL(platform_get_resource_byname);
/**
- * platform_get_irq - get an IRQ for a device
+ * platform_get_irq_byname - get an IRQ for a device by name
* @dev: platform device
* @name: IRQ name
*/
@@ -429,6 +437,7 @@ struct platform_device *platform_device_register_full(
goto err_alloc;
pdev->dev.parent = pdevinfo->parent;
+ ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
if (pdevinfo->dma_mask) {
/*
@@ -459,6 +468,7 @@ struct platform_device *platform_device_register_full(
ret = platform_device_add(pdev);
if (ret) {
err:
+ ACPI_HANDLE_SET(&pdev->dev, NULL);
kfree(pdev->dev.dma_mask);
err_alloc:
@@ -474,8 +484,16 @@ static int platform_drv_probe(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_attach(_dev, true);
- return drv->probe(dev);
+ ret = drv->probe(dev);
+ if (ret && ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
+
+ return ret;
}
static int platform_drv_probe_fail(struct device *_dev)
@@ -487,8 +505,13 @@ static int platform_drv_remove(struct device *_dev)
{
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
+ int ret;
- return drv->remove(dev);
+ ret = drv->remove(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
+
+ return ret;
}
static void platform_drv_shutdown(struct device *_dev)
@@ -497,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
+ if (ACPI_HANDLE(_dev))
+ acpi_dev_pm_detach(_dev, true);
}
/**
@@ -702,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv)
if (of_driver_match_device(dev, drv))
return 1;
+ /* Then try ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eb78e9640c4a..9d8fde709390 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
if (ce->status < PCE_STATUS_ERROR) {
if (ce->status == PCE_STATUS_ENABLED)
- clk_disable(ce->clk);
+ clk_disable_unprepare(ce->clk);
if (ce->status >= PCE_STATUS_ACQUIRED)
clk_put(ce->clk);
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_enable(clk);
+ clk_prepare_enable(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced on.\n");
}
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id)
clk = clk_get(dev, con_id);
if (!IS_ERR(clk)) {
- clk_disable(clk);
+ clk_disable_unprepare(clk);
clk_put(clk);
dev_info(dev, "Runtime PM disabled, clock forced off.\n");
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96b71b6536d6..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
*/
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
- if (!work_pending(&genpd->power_off_work))
- queue_work(pm_wq, &genpd->power_off_work);
+ queue_work(pm_wq, &genpd->power_off_work);
}
/**
@@ -470,10 +469,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
return -EBUSY;
not_suspended = 0;
- list_for_each_entry(pdd, &genpd->dev_list, list_node)
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ enum pm_qos_flags_status stat;
+
+ stat = dev_pm_qos_flags(pdd->dev,
+ PM_QOS_FLAG_NO_POWER_OFF
+ | PM_QOS_FLAG_REMOTE_WAKEUP);
+ if (stat > PM_QOS_FLAGS_NONE)
+ return -EBUSY;
+
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|| pdd->dev->power.irq_safe))
not_suspended++;
+ }
if (not_suspended > genpd->in_progress)
return -EBUSY;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a3c1404c7933..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
- dev_pm_qos_constraints_init(dev);
mutex_unlock(&dpm_list_mtx);
}
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
- dev_pm_qos_constraints_destroy(dev);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
@@ -513,6 +511,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
Out:
TRACE_RESUME(error);
+
+ pm_runtime_enable(dev);
return error;
}
@@ -589,8 +589,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
- pm_runtime_enable(dev);
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -930,6 +928,8 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ __pm_runtime_disable(dev, false);
+
if (dev->power.syscore)
return 0;
@@ -1133,11 +1133,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
-
if (error)
async_error = error;
- else if (dev->power.is_suspended)
- __pm_runtime_disable(dev, false);
return error;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d9468642fc41..32ee0fc7ea54 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -23,6 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/opp.h>
#include <linux/of.h>
+#include <linux/export.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -65,6 +66,7 @@ struct opp {
unsigned long u_volt;
struct device_opp *dev_opp;
+ struct rcu_head head;
};
/**
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
+EXPORT_SYMBOL_GPL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
+EXPORT_SYMBOL_GPL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
+EXPORT_SYMBOL_GPL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev)
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR.
+ * using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Note: available is a modifier for the search. if available=true, then the
* match is for exact matching frequency and is available in the stored OPP
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
bool available)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
+EXPORT_SYMBOL_GPL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
* for a device.
*
* Returns matching *opp and refreshes *freq accordingly, else returns
- * ERR_PTR in case of error and should be handled using IS_ERR.
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
{
struct device_opp *dev_opp;
- struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp))
- return opp;
+ return ERR_CAST(dev_opp);
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
if (temp_opp->available) {
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
+EXPORT_SYMBOL_GPL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
list_replace_rcu(&opp->node, &new_opp->node);
mutex_unlock(&dev_opp_list_lock);
- synchronize_rcu();
+ kfree_rcu(opp, head);
/* Notify the change of the OPP availability */
if (availability_req)
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
new_opp);
- /* clean up old opp */
- new_opp = opp;
- goto out;
+ return 0;
unlock:
mutex_unlock(&dev_opp_list_lock);
-out:
kfree(new_opp);
return r;
}
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
+EXPORT_SYMBOL_GPL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
+EXPORT_SYMBOL_GPL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
@@ -643,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
/**
* opp_free_cpufreq_table() - free the cpufreq table
@@ -660,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
+EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
@@ -720,4 +740,5 @@ int of_init_opp_table(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(of_init_opp_table);
#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 0dbfdf4419af..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
{
if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
+ dev->power.qos = NULL;
dev->power.early_init = true;
}
}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
static inline void device_pm_sleep_init(struct device *dev) {}
-static inline void device_pm_add(struct device *dev)
-{
- dev_pm_qos_constraints_init(dev);
-}
+static inline void device_pm_add(struct device *dev) {}
static inline void device_pm_remove(struct device *dev)
{
- dev_pm_qos_constraints_destroy(dev);
pm_runtime_remove(dev);
}
@@ -93,8 +89,10 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add(struct device *dev);
-extern void pm_qos_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
#else /* CONFIG_PM */
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 74a67e0019a2..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -40,6 +40,8 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
#include "power.h"
@@ -48,6 +50,51 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ struct pm_qos_flags *pqf;
+ s32 val;
+
+ if (IS_ERR_OR_NULL(qos))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ pqf = &qos->flags;
+ if (list_empty(&pqf->list))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ val = pqf->effective_flags & mask;
+ if (val)
+ return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+ return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ unsigned long irqflags;
+ enum pm_qos_flags_status ret;
+
+ spin_lock_irqsave(&dev->power.lock, irqflags);
+ ret = __dev_pm_qos_flags(dev, mask);
+ spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
+
+/**
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
* @dev: Device to get the PM QoS constraint value for.
*
@@ -55,9 +102,8 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
*/
s32 __dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c = dev->power.constraints;
-
- return c ? pm_qos_read_value(c) : 0;
+ return IS_ERR_OR_NULL(dev->power.qos) ?
+ 0 : pm_qos_read_value(&dev->power.qos->latency);
}
/**
@@ -76,30 +122,39 @@ s32 dev_pm_qos_read_value(struct device *dev)
return ret;
}
-/*
- * apply_constraint
- * @req: constraint request to apply
- * @action: action to perform add/update/remove, of type enum pm_qos_req_action
- * @value: defines the qos request
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
*
* Internal function to update the constraints list using the PM QoS core
* code and if needed call the per-device and the global notification
* callbacks
*/
static int apply_constraint(struct dev_pm_qos_request *req,
- enum pm_qos_req_action action, int value)
+ enum pm_qos_req_action action, s32 value)
{
- int ret, curr_value;
-
- ret = pm_qos_update_target(req->dev->power.constraints,
- &req->node, action, value);
+ struct dev_pm_qos *qos = req->dev->power.qos;
+ int ret;
- if (ret) {
- /* Call the global callbacks if needed */
- curr_value = pm_qos_read_value(req->dev->power.constraints);
- blocking_notifier_call_chain(&dev_pm_notifiers,
- (unsigned long)curr_value,
- req);
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
+ action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)value,
+ req);
+ }
+ break;
+ case DEV_PM_QOS_FLAGS:
+ ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
}
return ret;
@@ -114,47 +169,39 @@ static int apply_constraint(struct dev_pm_qos_request *req,
*/
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct pm_qos_constraints *c;
struct blocking_notifier_head *n;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
+ qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+ if (!qos)
return -ENOMEM;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n) {
- kfree(c);
+ kfree(qos);
return -ENOMEM;
}
BLOCKING_INIT_NOTIFIER_HEAD(n);
+ c = &qos->latency;
plist_head_init(&c->list);
c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
c->type = PM_QOS_MIN;
c->notifiers = n;
+ INIT_LIST_HEAD(&qos->flags.list);
+
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = c;
+ dev->power.qos = qos;
spin_unlock_irq(&dev->power.lock);
return 0;
}
-/**
- * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
- * @dev: target device
- *
- * Called from the device PM subsystem during device insertion under
- * device_pm_lock().
- */
-void dev_pm_qos_constraints_init(struct device *dev)
-{
- mutex_lock(&dev_pm_qos_mtx);
- dev->power.constraints = NULL;
- dev->power.power_state = PMSG_ON;
- mutex_unlock(&dev_pm_qos_mtx);
-}
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
/**
* dev_pm_qos_constraints_destroy
@@ -164,24 +211,27 @@ void dev_pm_qos_constraints_init(struct device *dev)
*/
void dev_pm_qos_constraints_destroy(struct device *dev)
{
+ struct dev_pm_qos *qos;
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
+ struct pm_qos_flags *f;
+
+ mutex_lock(&dev_pm_qos_mtx);
/*
- * If the device's PM QoS resume latency limit has been exposed to user
- * space, it has to be hidden at this point.
+ * If the device's PM QoS resume latency limit or PM QoS flags have been
+ * exposed to user space, they have to be hidden at this point.
*/
- dev_pm_qos_hide_latency_limit(dev);
-
- mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ __dev_pm_qos_hide_flags(dev);
- dev->power.power_state = PMSG_INVALID;
- c = dev->power.constraints;
- if (!c)
+ qos = dev->power.qos;
+ if (!qos)
goto out;
- /* Flush the constraints list for the device */
- plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /* Flush the constraints lists for the device. */
+ c = &qos->latency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
/*
* Update constraints list and call the notification
* callbacks if needed
@@ -189,13 +239,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
+ f = &qos->flags;
+ list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
spin_lock_irq(&dev->power.lock);
- dev->power.constraints = NULL;
+ dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
- kfree(c);
+ kfree(qos);
out:
mutex_unlock(&dev_pm_qos_mtx);
@@ -205,6 +260,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* dev_pm_qos_add_request - inserts new qos request into the list
* @dev: target device for the constraint
* @req: pointer to a preallocated handle
+ * @type: type of the request
* @value: defines the qos request
*
* This function inserts a new entry in the device constraints list of
@@ -218,9 +274,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
* to allocate for data structures, -ENODEV if the device has just been removed
* from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
- s32 value)
+ enum dev_pm_qos_req_type type, s32 value)
{
int ret = 0;
@@ -231,30 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
"%s() called for already added request\n", __func__))
return -EINVAL;
- req->dev = dev;
-
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints) {
- if (dev->power.power_state.event == PM_EVENT_INVALID) {
- /* The device has been removed from the system. */
- req->dev = NULL;
- ret = -ENODEV;
- goto out;
- } else {
- /*
- * Allocate the constraints data on the first call to
- * add_request, i.e. only if the data is not already
- * allocated and if the device has not been removed.
- */
- ret = dev_pm_qos_constraints_allocate(dev);
- }
- }
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
- if (!ret)
+ if (!ret) {
+ req->dev = dev;
+ req->type = type;
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+ }
- out:
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -262,6 +310,44 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ s32 curr_value;
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+
+ switch(req->type) {
+ case DEV_PM_QOS_LATENCY:
+ curr_value = req->data.pnode.prio;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ curr_value = req->data.flr.flags;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (curr_value != new_value)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+ return ret;
+}
+
+/**
* dev_pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a dev_pm_qos request to use
* @new_value: defines the qos request
@@ -275,11 +361,24 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
-int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
- s32 new_value)
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
{
- int ret = 0;
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_update_request(req, new_value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
if (!req) /*guard against callers passing in null */
return -EINVAL;
@@ -288,21 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
"%s() called for unknown object\n", __func__))
return -EINVAL;
- mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- if (new_value != req->node.prio)
- ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
- new_value);
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
- mutex_unlock(&dev_pm_qos_mtx);
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
/**
* dev_pm_qos_remove_request - modifies an existing qos request
@@ -315,29 +406,16 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
* 0 if the aggregated constraint value has not changed,
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
* removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
*/
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{
- int ret = 0;
-
- if (!req) /*guard against callers passing in null */
- return -EINVAL;
-
- if (WARN(!dev_pm_qos_request_active(req),
- "%s() called for unknown object\n", __func__))
- return -EINVAL;
+ int ret;
mutex_lock(&dev_pm_qos_mtx);
-
- if (req->dev->power.constraints) {
- ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
- PM_QOS_DEFAULT_VALUE);
- memset(req, 0, sizeof(*req));
- } else {
- /* Return if the device has been removed */
- ret = -ENODEV;
- }
-
+ ret = __dev_pm_qos_remove_request(req);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
@@ -362,13 +440,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
mutex_lock(&dev_pm_qos_mtx);
- if (!dev->power.constraints)
- ret = dev->power.power_state.event != PM_EVENT_INVALID ?
- dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
if (!ret)
ret = blocking_notifier_chain_register(
- dev->power.constraints->notifiers, notifier);
+ dev->power.qos->latency.notifiers, notifier);
mutex_unlock(&dev_pm_qos_mtx);
return ret;
@@ -393,9 +472,9 @@ int dev_pm_qos_remove_notifier(struct device *dev,
mutex_lock(&dev_pm_qos_mtx);
/* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
+ if (!IS_ERR_OR_NULL(dev->power.qos))
retval = blocking_notifier_chain_unregister(
- dev->power.constraints->notifiers,
+ dev->power.qos->latency.notifiers,
notifier);
mutex_unlock(&dev_pm_qos_mtx);
@@ -443,26 +522,40 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
struct dev_pm_qos_request *req, s32 value)
{
struct device *ancestor = dev->parent;
- int error = -ENODEV;
+ int ret = -ENODEV;
while (ancestor && !ancestor->power.ignore_children)
ancestor = ancestor->parent;
if (ancestor)
- error = dev_pm_qos_add_request(ancestor, req, value);
+ ret = dev_pm_qos_add_request(ancestor, req,
+ DEV_PM_QOS_LATENCY, value);
- if (error)
+ if (ret < 0)
req->dev = NULL;
- return error;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
#ifdef CONFIG_PM_RUNTIME
-static void __dev_pm_qos_drop_user_request(struct device *dev)
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
{
- dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = NULL;
+ struct dev_pm_qos_request *req = NULL;
+
+ switch(type) {
+ case DEV_PM_QOS_LATENCY:
+ req = dev->power.qos->latency_req;
+ dev->power.qos->latency_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ req = dev->power.qos->flags_req;
+ dev->power.qos->flags_req = NULL;
+ break;
+ }
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
}
/**
@@ -478,36 +571,164 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
- if (dev->power.pq_req)
- return -EEXIST;
-
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- ret = dev_pm_qos_add_request(dev, req, value);
- if (ret < 0)
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+ if (ret < 0) {
+ kfree(req);
return ret;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->latency_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
- dev->power.pq_req = req;
- ret = pm_qos_sysfs_add(dev);
+ dev->power.qos->latency_req = req;
+ ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- __dev_pm_qos_drop_user_request(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ }
+}
+
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (dev->power.pq_req) {
- pm_qos_sysfs_remove(dev);
- __dev_pm_qos_drop_user_request(dev);
- }
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev))
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->flags_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ goto out;
+ }
+
+ dev->power.qos->flags_req = req;
+ ret = pm_qos_sysfs_add_flags(dev);
+ if (ret)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ }
+}
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_flags(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+ s32 value;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = dev_pm_qos_requested_flags(dev);
+ if (set)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+#else /* !CONFIG_PM_RUNTIME */
+static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static void __dev_pm_qos_hide_flags(struct device *dev) {}
#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3148b10dc2e5..1244930e3d7a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
/**
* rpm_check_suspend_allowed - Test whether a device may be suspended.
* @dev: Device to test.
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
if (!cb)
return -ENOSYS;
- retval = __rpm_callback(cb, dev);
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index b91dc6f1e914..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
static ssize_t pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
+ return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
}
static ssize_t pm_qos_latency_store(struct device *dev,
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev,
if (value < 0)
return -EINVAL;
- ret = dev_pm_qos_update_request(dev->power.pq_req, value);
+ ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
pm_qos_latency_show, pm_qos_latency_store);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_no_power_off, 0644,
+ pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+
+static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_REMOTE_WAKEUP));
+}
+
+static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
+ pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
-static struct attribute *pm_qos_attrs[] = {
+static struct attribute *pm_qos_latency_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
-static struct attribute_group pm_qos_attr_group = {
+static struct attribute_group pm_qos_latency_attr_group = {
.name = power_group_name,
- .attrs = pm_qos_attrs,
+ .attrs = pm_qos_latency_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_no_power_off.attr,
+ &dev_attr_pm_qos_remote_wakeup.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_flags_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_flags_attrs,
};
int dpm_sysfs_add(struct device *dev)
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-int pm_qos_sysfs_add(struct device *dev)
+int pm_qos_sysfs_add_latency(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
{
- return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
-void pm_qos_sysfs_remove(struct device *dev)
+void pm_qos_sysfs_remove_flags(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
@@ -632,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
+ dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
+ /*
+ * active wakeup source should bring the system
+ * out of PM_SUSPEND_FREEZE state
+ */
+ freeze_wake();
+
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5e75d1b683e2..cf129980abd0 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 80f9ab9c3aa4..5a22bd33ce3d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -15,10 +15,20 @@
#include <linux/regmap.h>
#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
struct regmap;
struct regcache_ops;
+struct regmap_debugfs_off_cache {
+ struct list_head list;
+ off_t min;
+ off_t max;
+ unsigned int base_reg;
+ unsigned int max_reg;
+};
+
struct regmap_format {
size_t buf_size;
size_t reg_bytes;
@@ -31,14 +41,19 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
-typedef void (*regmap_lock)(struct regmap *map);
-typedef void (*regmap_unlock)(struct regmap *map);
+struct regmap_async {
+ struct list_head list;
+ struct work_struct cleanup;
+ struct regmap *map;
+ void *work_buf;
+};
struct regmap {
struct mutex mutex;
spinlock_t spinlock;
regmap_lock lock;
regmap_unlock unlock;
+ void *lock_arg; /* This is passed to lock/unlock functions */
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
@@ -47,9 +62,20 @@ struct regmap {
void *bus_context;
const char *name;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ int async_ret;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
const char *debugfs_name;
+
+ unsigned int debugfs_reg_len;
+ unsigned int debugfs_val_len;
+ unsigned int debugfs_tot_len;
+
+ struct list_head debugfs_off_cache;
#endif
unsigned int max_register;
@@ -57,6 +83,15 @@ struct regmap {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+ bool defer_caching;
u8 read_flag_mask;
u8 write_flag_mask;
@@ -120,6 +155,8 @@ int _regmap_write(struct regmap *map, unsigned int reg,
struct regmap_range_node {
struct rb_node node;
+ const char *name;
+ struct regmap *map;
unsigned int range_min;
unsigned int range_max;
@@ -157,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000000..d9762e41959b
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,72 @@
+/*
+ * Register cache access API - flat caching support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+static int regcache_flat_init(struct regmap *map)
+{
+ int i;
+ unsigned int *cache;
+
+ map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
+ GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ cache = map->cache;
+
+ for (i = 0; i < map->num_reg_defaults; i++)
+ cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+
+ return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ unsigned int *cache = map->cache;
+
+ *value = cache[reg];
+
+ return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ unsigned int *cache = map->cache;
+
+ cache[reg] = value;
+
+ return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+ .type = REGCACHE_FLAT,
+ .name = "flat",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .read = regcache_flat_read,
+ .write = regcache_flat_write,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 835883bda977..e69ff3e4742c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -22,6 +22,7 @@
static const struct regcache_ops *cache_types[] = {
&regcache_rbtree_ops,
&regcache_lzo_ops,
+ &regcache_flat_ops,
};
static int regcache_hw_init(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bb1ff175b962..81d6f605c92e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,17 +56,128 @@ static const struct file_operations regmap_name_fops = {
.llseek = default_llseek,
};
-static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
{
- int reg_len, val_len, tot_len;
- size_t buf_pos = 0;
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
+/*
+ * Work out where the start offset maps into register numbers, bearing
+ * in mind that we suppress hidden registers.
+ */
+static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
+ unsigned int base,
+ loff_t from,
+ loff_t *pos)
+{
+ struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
+ unsigned int i, ret;
+ unsigned int fpos_offset;
+ unsigned int reg_offset;
+
+ /*
+ * If we don't have a cache build one so we don't have to do a
+ * linear scan each time.
+ */
+ if (list_empty(&map->debugfs_off_cache)) {
+ for (i = base; i <= map->max_register; i += map->reg_stride) {
+ /* Skip unprinted registers, closing off cache entry */
+ if (!regmap_readable(map, i) ||
+ regmap_precious(map, i)) {
+ if (c) {
+ c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ c = NULL;
+ }
+
+ continue;
+ }
+
+ /* No cache entry? Start a new one */
+ if (!c) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ return base;
+ }
+ c->min = p;
+ c->base_reg = i;
+ }
+
+ p += map->debugfs_tot_len;
+ }
+ }
+
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ fpos_offset = c->max - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ c->max_reg = c->base_reg + reg_offset;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ ret = base;
+
+ /* Find the relevant block:offset */
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (from >= c->min && from <= c->max) {
+ fpos_offset = from - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ *pos = c->min + (reg_offset * map->debugfs_tot_len);
+ return c->base_reg + reg_offset;
+ }
+
+ *pos = c->max;
+ ret = c->max_reg;
+ }
+
+ return ret;
+}
+
+static inline void regmap_calc_tot_len(struct regmap *map,
+ void *buf, size_t count)
+{
+ /* Calculate the length of a fixed format */
+ if (!map->debugfs_tot_len) {
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+ buf, count);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+ }
+}
+
+static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
+ unsigned int to, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ size_t buf_pos = 0;
+ loff_t p = *ppos;
ssize_t ret;
int i;
- struct regmap *map = file->private_data;
char *buf;
- unsigned int val;
+ unsigned int val, start_reg;
if (*ppos < 0 || !count)
return -EINVAL;
@@ -75,12 +186,12 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- /* Calculate the length of a fixed format */
- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
- val_len = 2 * map->format.val_bytes;
- tot_len = reg_len + val_len + 3; /* : \n */
+ regmap_calc_tot_len(map, buf, count);
- for (i = 0; i <= map->max_register; i += map->reg_stride) {
+ /* Work out which register we're starting at */
+ start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
+
+ for (i = start_reg; i <= to; i += map->reg_stride) {
if (!regmap_readable(map, i))
continue;
@@ -90,26 +201,27 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + map->debugfs_tot_len > count)
break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
- reg_len, i);
- buf_pos += reg_len + 2;
+ map->debugfs_reg_len, i - from);
+ buf_pos += map->debugfs_reg_len + 2;
/* Format the value, write all X if we can't read */
ret = regmap_read(map, i, &val);
if (ret == 0)
snprintf(buf + buf_pos, count - buf_pos,
- "%.*x", val_len, val);
+ "%.*x", map->debugfs_val_len, val);
else
- memset(buf + buf_pos, 'X', val_len);
+ memset(buf + buf_pos, 'X',
+ map->debugfs_val_len);
buf_pos += 2 * map->format.val_bytes;
buf[buf_pos++] = '\n';
}
- p += tot_len;
+ p += map->debugfs_tot_len;
}
ret = buf_pos;
@@ -126,6 +238,15 @@ out:
return ret;
}
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+
+ return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+ count, ppos);
+}
+
#undef REGMAP_ALLOW_WRITE_DEBUGFS
#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
@@ -158,7 +279,7 @@ static ssize_t regmap_map_write_file(struct file *file,
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
- add_taint(TAINT_USER);
+ add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
regmap_write(map, reg, value);
return buf_size;
@@ -174,6 +295,22 @@ static const struct file_operations regmap_map_fops = {
.llseek = default_llseek,
};
+static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap_range_node *range = file->private_data;
+ struct regmap *map = range->map;
+
+ return regmap_read_debugfs(map, range->range_min, range->range_max,
+ user_buf, count, ppos);
+}
+
+static const struct file_operations regmap_range_fops = {
+ .open = simple_open,
+ .read = regmap_range_read_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
@@ -244,6 +381,11 @@ static const struct file_operations regmap_access_fops = {
void regmap_debugfs_init(struct regmap *map, const char *name)
{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+
if (name) {
map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
dev_name(map->dev), name);
@@ -276,11 +418,24 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
debugfs_create_bool("cache_bypass", 0400, map->debugfs,
&map->cache_bypass);
}
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+
+ if (range_node->name)
+ debugfs_create_file(range_node->name, 0400,
+ map->debugfs, range_node,
+ &regmap_range_fops);
+
+ next = rb_next(&range_node->node);
+ }
}
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
+ regmap_debugfs_free_dump_cache(map);
kfree(map->debugfs_name);
}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5b6b1d8e6cc0..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
int irq;
int wake_count;
+ void *status_reg_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
+
+ reg = d->chip->wake_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ if (d->wake_buf) {
+ if (d->chip->wake_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ ~d->wake_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to sync wakes in %x: %d\n",
+ reg, ret);
+ }
}
if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- if (!d->chip->wake_base)
- return -EINVAL;
-
if (on) {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- &= ~irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
d->wake_count++;
} else {
- d->wake_buf[irq_data->reg_offset / map->reg_stride]
- |= irq_data->mask;
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
d->wake_count--;
}
@@ -167,30 +184,75 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
+ pm_runtime_put(map->dev);
return IRQ_NONE;
}
}
/*
- * Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
- * interrupt. We assume that typically few of the interrupts
- * will fire simultaneously so don't worry about overhead from
- * doing a write per register.
+ * Read in the statuses, using a single bulk read if possible
+ * in order to reduce the I/O overheads.
*/
- for (i = 0; i < data->chip->num_regs; i++) {
- ret = regmap_read(map, chip->status_base + (i * map->reg_stride
- * data->irq_reg_stride),
- &data->status_buf[i]);
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ data->irq_reg_stride == 1) {
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ BUG_ON(!data->status_reg_buf);
+
+ ret = regmap_bulk_read(map, chip->status_base,
+ data->status_reg_buf,
+ chip->num_regs);
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
- ret);
- if (chip->runtime_pm)
- pm_runtime_put(map->dev);
+ ret);
return IRQ_NONE;
}
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+ }
+
+ } else {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ ret = regmap_read(map, chip->status_base +
+ (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->status_buf[i]);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status: %d\n",
+ ret);
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+ return IRQ_NONE;
+ }
+ }
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +378,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->irq_chip = regmap_irq_chip;
d->irq_chip.name = chip->name;
- if (!chip->wake_base) {
- d->irq_chip.irq_set_wake = NULL;
- d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
- IRQCHIP_SKIP_SET_WAKE;
- }
d->irq = irq;
d->map = map;
d->chip = chip;
@@ -331,6 +388,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
else
d->irq_reg_stride = 1;
+ if (!map->use_single_rw && map->reg_stride == 1 &&
+ d->irq_reg_stride == 1) {
+ d->status_reg_buf = kmalloc(map->format.val_bytes *
+ chip->num_regs, GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+ }
+
mutex_init(&d->lock);
for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +426,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->wake_buf[i] = d->mask_buf_def[i];
reg = chip->wake_base +
(i * map->reg_stride * d->irq_reg_stride);
- ret = regmap_update_bits(map, reg, d->wake_buf[i],
- d->wake_buf[i]);
+
+ if (chip->wake_invert)
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ 0);
+ else
+ ret = regmap_update_bits(map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
@@ -401,6 +473,7 @@ err_alloc:
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
+ kfree(d->status_reg_buf);
kfree(d);
return ret;
}
@@ -422,6 +495,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
kfree(d->status_buf);
kfree(d);
}
@@ -458,3 +532,22 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
+
+/**
+ * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
+ *
+ * Useful for drivers to request their own IRQs and for integration
+ * with subsystems. For ease of integration NULL is accepted as a
+ * domain, allowing devices to just call this even if no domain is
+ * allocated.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
+{
+ if (data)
+ return data->domain;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index f05fc74dd84a..98745dd77e8c 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -16,6 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -26,6 +27,7 @@
struct regmap_mmio_context {
void __iomem *regs;
unsigned val_bytes;
+ struct clk *clk;
};
static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
{
struct regmap_mmio_context *ctx = context;
u32 offset;
+ int ret;
BUG_ON(reg_size != 4);
+ if (ctx->clk) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
offset = *(u32 *)reg;
while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
+ if (ctx->clk)
+ clk_disable(ctx->clk);
+
return 0;
}
static void regmap_mmio_free_context(void *context)
{
+ struct regmap_mmio_context *ctx = context;
+
+ if (ctx->clk) {
+ clk_unprepare(ctx->clk);
+ clk_put(ctx->clk);
+ }
kfree(context);
}
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
int min_stride;
+ int ret;
if (config->reg_bits != 32)
return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
}
/**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio_clk(): Initialise register map with register clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
* a struct regmap.
*/
-struct regmap *regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(regmap_init_mmio);
+EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
/**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
*
* @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
* @regs: Pointer to memory-mapped IO region
* @config: Configuration for register map
*
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
* to a struct regmap. The regmap will be automatically freed by the
* device management code.
*/
-struct regmap *devm_regmap_init_mmio(struct device *dev,
- void __iomem *regs,
- const struct regmap_config *config)
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
- ctx = regmap_mmio_gen_context(regs, config);
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
return devm_regmap_init(dev, &regmap_mmio, ctx, config);
}
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index ffa46a92ad33..4c506bd940f3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,6 +15,21 @@
#include <linux/init.h>
#include <linux/module.h>
+#include "internal.h"
+
+struct regmap_async_spi {
+ struct regmap_async core;
+ struct spi_message m;
+ struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+ struct regmap_async_spi *async = data;
+
+ regmap_async_complete_cb(&async->core, async->m.status);
+}
+
static int regmap_spi_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
return spi_sync(spi, &m);
}
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ struct regmap_async_spi *async = container_of(a,
+ struct regmap_async_spi,
+ core);
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ async->t[0].tx_buf = reg;
+ async->t[0].len = reg_len;
+ async->t[1].tx_buf = val;
+ async->t[1].len = val_len;
+
+ spi_message_init(&async->m);
+ spi_message_add_tail(&async->t[0], &async->m);
+ spi_message_add_tail(&async->t[1], &async->m);
+
+ async->m.complete = regmap_spi_complete;
+ async->m.context = async;
+
+ return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ struct regmap_async_spi *async_spi;
+
+ async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+ if (!async_spi)
+ return NULL;
+
+ return &async_spi->core;
+}
+
static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
static struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
.read = regmap_spi_read,
.read_flag_mask = 0x80,
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 52069d29ff12..3d2367501fd0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/rbtree.h>
+#include <linux/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regmap.h>
@@ -34,6 +35,52 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val);
+
+static void async_cleanup(struct work_struct *work)
+{
+ struct regmap_async *async = container_of(work, struct regmap_async,
+ cleanup);
+
+ kfree(async->work_buf);
+ kfree(async);
+}
+
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges)
+{
+ const struct regmap_range *r;
+ int i;
+
+ for (i = 0, r = ranges; i < nranges; i++, r++)
+ if (regmap_reg_in_range(reg, r))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
+
+static bool _regmap_check_range_table(struct regmap *map,
+ unsigned int reg,
+ const struct regmap_access_table *table)
+{
+ /* Check "no ranges" first */
+ if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
+ return false;
+
+ /* In case zero "yes ranges" are supplied, any reg is OK */
+ if (!table->n_yes_ranges)
+ return true;
+
+ return regmap_reg_in_ranges(reg, table->yes_ranges,
+ table->n_yes_ranges);
+}
+
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
if (map->max_register && reg > map->max_register)
@@ -42,6 +89,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
if (map->writeable_reg)
return map->writeable_reg(map->dev, reg);
+ if (map->wr_table)
+ return _regmap_check_range_table(map, reg, map->wr_table);
+
return true;
}
@@ -56,6 +106,9 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (map->readable_reg)
return map->readable_reg(map->dev, reg);
+ if (map->rd_table)
+ return _regmap_check_range_table(map, reg, map->rd_table);
+
return true;
}
@@ -67,6 +120,9 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
if (map->volatile_reg)
return map->volatile_reg(map->dev, reg);
+ if (map->volatile_table)
+ return _regmap_check_range_table(map, reg, map->volatile_table);
+
return true;
}
@@ -78,11 +134,14 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
if (map->precious_reg)
return map->precious_reg(map->dev, reg);
+ if (map->precious_table)
+ return _regmap_check_range_table(map, reg, map->precious_table);
+
return false;
}
static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
- unsigned int num)
+ size_t num)
{
unsigned int i;
@@ -214,23 +273,27 @@ static unsigned int regmap_parse_32_native(void *buf)
return *(u32 *)buf;
}
-static void regmap_lock_mutex(struct regmap *map)
+static void regmap_lock_mutex(void *__map)
{
+ struct regmap *map = __map;
mutex_lock(&map->mutex);
}
-static void regmap_unlock_mutex(struct regmap *map)
+static void regmap_unlock_mutex(void *__map)
{
+ struct regmap *map = __map;
mutex_unlock(&map->mutex);
}
-static void regmap_lock_spinlock(struct regmap *map)
+static void regmap_lock_spinlock(void *__map)
{
+ struct regmap *map = __map;
spin_lock(&map->spinlock);
}
-static void regmap_unlock_spinlock(struct regmap *map)
+static void regmap_unlock_spinlock(void *__map)
{
+ struct regmap *map = __map;
spin_unlock(&map->spinlock);
}
@@ -326,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
enum regmap_endian reg_endian, val_endian;
int i, j;
- if (!bus || !config)
+ if (!config)
goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -335,14 +398,22 @@ struct regmap *regmap_init(struct device *dev,
goto err;
}
- if (bus->fast_io) {
- spin_lock_init(&map->spinlock);
- map->lock = regmap_lock_spinlock;
- map->unlock = regmap_unlock_spinlock;
+ if (config->lock && config->unlock) {
+ map->lock = config->lock;
+ map->unlock = config->unlock;
+ map->lock_arg = config->lock_arg;
} else {
- mutex_init(&map->mutex);
- map->lock = regmap_lock_mutex;
- map->unlock = regmap_unlock_mutex;
+ if ((bus && bus->fast_io) ||
+ config->fast_io) {
+ spin_lock_init(&map->spinlock);
+ map->lock = regmap_lock_spinlock;
+ map->unlock = regmap_unlock_spinlock;
+ } else {
+ mutex_init(&map->mutex);
+ map->lock = regmap_lock_mutex;
+ map->unlock = regmap_unlock_mutex;
+ }
+ map->lock_arg = map;
}
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
@@ -359,6 +430,10 @@ struct regmap *regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->wr_table = config->wr_table;
+ map->rd_table = config->rd_table;
+ map->volatile_table = config->volatile_table;
+ map->precious_table = config->precious_table;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -366,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
map->cache_type = config->cache_type;
map->name = config->name;
+ spin_lock_init(&map->async_lock);
+ INIT_LIST_HEAD(&map->async_list);
+ init_waitqueue_head(&map->async_waitq);
+
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
- } else {
+ } else if (bus) {
map->read_flag_mask = bus->read_flag_mask;
}
+ if (!bus) {
+ map->reg_read = config->reg_read;
+ map->reg_write = config->reg_write;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
+ } else {
+ map->reg_read = _regmap_bus_read;
+ }
+
reg_endian = config->reg_format_endian;
if (reg_endian == REGMAP_ENDIAN_DEFAULT)
reg_endian = bus->reg_format_endian_default;
@@ -443,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 24:
+ if (reg_endian != REGMAP_ENDIAN_BIG)
+ goto err_map;
+ map->format.format_reg = regmap_format_24;
+ break;
+
case 32:
switch (reg_endian) {
case REGMAP_ENDIAN_BIG:
@@ -518,21 +613,49 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
+ if (map->format.format_write) {
+ map->defer_caching = false;
+ map->reg_write = _regmap_bus_formatted_write;
+ } else if (map->format.format_val) {
+ map->defer_caching = true;
+ map->reg_write = _regmap_bus_raw_write;
+ }
+
+skip_format_initialization:
+
map->range_tree = RB_ROOT;
- for (i = 0; i < config->n_ranges; i++) {
+ for (i = 0; i < config->num_ranges; i++) {
const struct regmap_range_cfg *range_cfg = &config->ranges[i];
struct regmap_range_node *new;
/* Sanity check */
- if (range_cfg->range_max < range_cfg->range_min ||
- range_cfg->range_max > map->max_register ||
- range_cfg->selector_reg > map->max_register ||
- range_cfg->window_len == 0)
+ if (range_cfg->range_max < range_cfg->range_min) {
+ dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ range_cfg->range_max, range_cfg->range_min);
+ goto err_range;
+ }
+
+ if (range_cfg->range_max > map->max_register) {
+ dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ range_cfg->range_max, map->max_register);
+ goto err_range;
+ }
+
+ if (range_cfg->selector_reg > map->max_register) {
+ dev_err(map->dev,
+ "Invalid range %d: selector out of map\n", i);
goto err_range;
+ }
+
+ if (range_cfg->window_len == 0) {
+ dev_err(map->dev, "Invalid range %d: window_len 0\n",
+ i);
+ goto err_range;
+ }
/* Make sure, that this register range has no selector
or data window within its boundary */
- for (j = 0; j < config->n_ranges; j++) {
+ for (j = 0; j < config->num_ranges; j++) {
unsigned sel_reg = config->ranges[j].selector_reg;
unsigned win_min = config->ranges[j].window_start;
unsigned win_max = win_min +
@@ -540,11 +663,17 @@ struct regmap *regmap_init(struct device *dev,
if (range_cfg->range_min <= sel_reg &&
sel_reg <= range_cfg->range_max) {
+ dev_err(map->dev,
+ "Range %d: selector for %d in window\n",
+ i, j);
goto err_range;
}
if (!(win_max < range_cfg->range_min ||
win_min > range_cfg->range_max)) {
+ dev_err(map->dev,
+ "Range %d: window for %d in window\n",
+ i, j);
goto err_range;
}
}
@@ -555,6 +684,8 @@ struct regmap *regmap_init(struct device *dev,
goto err_range;
}
+ new->map = map;
+ new->name = range_cfg->name;
new->range_min = range_cfg->range_min;
new->range_max = range_cfg->range_max;
new->selector_reg = range_cfg->selector_reg;
@@ -564,6 +695,7 @@ struct regmap *regmap_init(struct device *dev,
new->window_len = range_cfg->window_len;
if (_regmap_range_add(map, new) == false) {
+ dev_err(map->dev, "Failed to add range %d\n", i);
kfree(new);
goto err_range;
}
@@ -579,7 +711,7 @@ struct regmap *regmap_init(struct device *dev,
}
ret = regcache_init(map, config);
- if (ret < 0)
+ if (ret != 0)
goto err_range;
regmap_debugfs_init(map, config->name);
@@ -692,7 +824,7 @@ void regmap_exit(struct regmap *map)
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
- if (map->bus->free_context)
+ if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
kfree(map);
@@ -738,65 +870,68 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(dev_get_regmap);
static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+ struct regmap_range_node *range,
unsigned int val_num)
{
- struct regmap_range_node *range;
void *orig_work_buf;
unsigned int win_offset;
unsigned int win_page;
bool page_chg;
int ret;
- range = _regmap_range_lookup(map, *reg);
- if (range) {
- win_offset = (*reg - range->range_min) % range->window_len;
- win_page = (*reg - range->range_min) / range->window_len;
+ win_offset = (*reg - range->range_min) % range->window_len;
+ win_page = (*reg - range->range_min) / range->window_len;
- if (val_num > 1) {
- /* Bulk write shouldn't cross range boundary */
- if (*reg + val_num - 1 > range->range_max)
- return -EINVAL;
-
- /* ... or single page boundary */
- if (val_num > range->window_len - win_offset)
- return -EINVAL;
- }
+ if (val_num > 1) {
+ /* Bulk write shouldn't cross range boundary */
+ if (*reg + val_num - 1 > range->range_max)
+ return -EINVAL;
- /* It is possible to have selector register inside data window.
- In that case, selector register is located on every page and
- it needs no page switching, when accessed alone. */
- if (val_num > 1 ||
- range->window_start + win_offset != range->selector_reg) {
- /* Use separate work_buf during page switching */
- orig_work_buf = map->work_buf;
- map->work_buf = map->selector_work_buf;
+ /* ... or single page boundary */
+ if (val_num > range->window_len - win_offset)
+ return -EINVAL;
+ }
- ret = _regmap_update_bits(map, range->selector_reg,
- range->selector_mask,
- win_page << range->selector_shift,
- &page_chg);
+ /* It is possible to have selector register inside data window.
+ In that case, selector register is located on every page and
+ it needs no page switching, when accessed alone. */
+ if (val_num > 1 ||
+ range->window_start + win_offset != range->selector_reg) {
+ /* Use separate work_buf during page switching */
+ orig_work_buf = map->work_buf;
+ map->work_buf = map->selector_work_buf;
- map->work_buf = orig_work_buf;
+ ret = _regmap_update_bits(map, range->selector_reg,
+ range->selector_mask,
+ win_page << range->selector_shift,
+ &page_chg);
- if (ret < 0)
- return ret;
- }
+ map->work_buf = orig_work_buf;
- *reg = range->window_start + win_offset;
+ if (ret != 0)
+ return ret;
}
+ *reg = range->window_start + win_offset;
+
return 0;
}
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
- const void *val, size_t val_len)
+ const void *val, size_t val_len, bool async)
{
+ struct regmap_range_node *range;
+ unsigned long flags;
u8 *u8 = map->work_buf;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
void *buf;
int ret = -ENOTSUPP;
size_t len;
int i;
+ BUG_ON(!map->bus);
+
/* Check for unwritable registers before we start */
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -814,7 +949,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
ival);
if (ret) {
dev_err(map->dev,
- "Error in caching of register: %u ret: %d\n",
+ "Error in caching of register: %x ret: %d\n",
reg + i, ret);
return ret;
}
@@ -825,14 +960,84 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
}
- ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
- if (ret < 0)
- return ret;
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ int val_num = val_len / map->format.val_bytes;
+ int win_offset = (reg - range->range_min) % range->window_len;
+ int win_residue = range->window_len - win_offset;
+
+ /* If the write goes beyond the end of the window split it */
+ while (val_num > win_residue) {
+ dev_dbg(map->dev, "Writing window %d/%zu\n",
+ win_residue, val_len / map->format.val_bytes);
+ ret = _regmap_raw_write(map, reg, val, win_residue *
+ map->format.val_bytes, async);
+ if (ret != 0)
+ return ret;
+
+ reg += win_residue;
+ val_num -= win_residue;
+ val += win_residue * map->format.val_bytes;
+ val_len -= win_residue * map->format.val_bytes;
+
+ win_offset = (reg - range->range_min) %
+ range->window_len;
+ win_residue = range->window_len - win_offset;
+ }
+
+ ret = _regmap_select_page(map, &reg, range, val_num);
+ if (ret != 0)
+ return ret;
+ }
map->format.format_reg(map->work_buf, reg, map->reg_shift);
u8[0] |= map->write_flag_mask;
+ if (async && map->bus->async_write) {
+ struct regmap_async *async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&async->cleanup, async_cleanup);
+ async->map = map;
+
+ /* If the caller supplied the value we can use it safely. */
+ memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+ map->format.reg_bytes + map->format.val_bytes);
+ if (val == work_val)
+ val = async->work_buf + map->format.pad_bytes +
+ map->format.reg_bytes;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_add_tail(&async->list, &map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ ret = map->bus->async_write(map->bus_context, async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to schedule write: %d\n",
+ ret);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ kfree(async->work_buf);
+ kfree(async);
+ }
+ }
+
trace_regmap_hw_write_start(map->dev, reg,
val_len / map->format.val_bytes);
@@ -840,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
* send the work_buf directly, otherwise try to do a gather
* write.
*/
- if (val == (map->work_buf + map->format.pad_bytes +
- map->format.reg_bytes))
+ if (val == work_val)
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
@@ -873,13 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+ struct regmap_range_node *range;
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_write);
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ map->format.format_write(map, reg, val);
+
+ trace_regmap_hw_write_start(map->dev, reg, 1);
+
+ ret = map->bus->write(map->bus_context, map->work_buf,
+ map->format.buf_size);
+
+ trace_regmap_hw_write_done(map->dev, reg, 1);
+
+ return ret;
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ BUG_ON(!map->bus || !map->format.format_val);
+
+ map->format.format_val(map->work_buf + map->format.reg_bytes
+ + map->format.pad_bytes, val, 0);
+ return _regmap_raw_write(map, reg,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ map->format.val_bytes, false);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+ return (map->bus) ? map : map->bus_context;
+}
+
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val)
{
int ret;
- BUG_ON(!map->format.format_write && !map->format.format_val);
+ void *context = _regmap_map_get_context(map);
- if (!map->cache_bypass && map->format.format_write) {
+ if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
@@ -896,30 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
trace_regmap_reg_write(map->dev, reg, val);
- if (map->format.format_write) {
- ret = _regmap_select_page(map, &reg, 1);
- if (ret < 0)
- return ret;
-
- map->format.format_write(map, reg, val);
-
- trace_regmap_hw_write_start(map->dev, reg, 1);
-
- ret = map->bus->write(map->bus_context, map->work_buf,
- map->format.buf_size);
-
- trace_regmap_hw_write_done(map->dev, reg, 1);
-
- return ret;
- } else {
- map->format.format_val(map->work_buf + map->format.reg_bytes
- + map->format.pad_bytes, val, 0);
- return _regmap_raw_write(map, reg,
- map->work_buf +
- map->format.reg_bytes +
- map->format.pad_bytes,
- map->format.val_bytes);
- }
+ return map->reg_write(context, reg, val);
}
/**
@@ -939,11 +1169,11 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_write(map, reg, val);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -970,16 +1200,18 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
- ret = _regmap_raw_write(map, reg, val, val_len);
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -994,7 +1226,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
* @val_count: Number of registers to write
*
* This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
@@ -1006,12 +1238,14 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_bytes = map->format.val_bytes;
void *wval;
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
@@ -1033,34 +1267,84 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
ret = regmap_raw_write(map,
- reg + (i * map->reg_stride),
- val + (i * val_bytes),
- val_bytes);
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
if (ret != 0)
return ret;
}
} else {
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
+ false);
}
if (val_bytes != 1)
kfree(wval);
out:
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
+/**
+ * regmap_raw_write_async(): Write raw values to one or more registers
+ * asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device. Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI. regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_raw_write(map, reg, val, val_len, true);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
+ struct regmap_range_node *range;
u8 *u8 = map->work_buf;
int ret;
- ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
- if (ret < 0)
- return ret;
+ BUG_ON(!map->bus);
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range,
+ val_len / map->format.val_bytes);
+ if (ret != 0)
+ return ret;
+ }
map->format.format_reg(map->work_buf, reg, map->reg_shift);
@@ -1085,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct regmap *map = context;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ if (ret == 0)
+ *val = map->format.parse_val(map->work_buf);
+
+ return ret;
+}
+
static int _regmap_read(struct regmap *map, unsigned int reg,
unsigned int *val)
{
int ret;
+ void *context = _regmap_map_get_context(map);
+
+ BUG_ON(!map->reg_read);
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
@@ -1096,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
return 0;
}
- if (!map->format.parse_val)
- return -EINVAL;
-
if (map->cache_only)
return -EBUSY;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = map->reg_read(context, reg, val);
if (ret == 0) {
- *val = map->format.parse_val(map->work_buf);
-
#ifdef LOG_DEVICE
if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
trace_regmap_reg_read(map->dev, reg, *val);
- }
- if (ret == 0 && !map->cache_bypass)
- regcache_write(map, reg, *val);
+ if (!map->cache_bypass)
+ regcache_write(map, reg, *val);
+ }
return ret;
}
@@ -1137,11 +1435,11 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_read(map, reg, val);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1166,12 +1464,14 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
+ if (!map->bus)
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (reg % map->reg_stride)
return -EINVAL;
- map->lock(map);
+ map->lock(map->lock_arg);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
@@ -1193,7 +1493,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
}
out:
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1217,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_bytes = map->format.val_bytes;
bool vol = regmap_volatile_range(map, reg, val_count);
+ if (!map->bus)
+ return -EINVAL;
if (!map->format.parse_val)
return -EINVAL;
if (reg % map->reg_stride)
@@ -1300,9 +1602,9 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
bool change;
int ret;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_update_bits(map, reg, mask, val, &change);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -1326,13 +1628,75 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
{
int ret;
- map->lock(map);
+ map->lock(map->lock_arg);
ret = _regmap_update_bits(map, reg, mask, val, change);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+ struct regmap *map = async->map;
+ bool wake;
+
+ spin_lock(&map->async_lock);
+
+ list_del(&async->list);
+ wake = list_empty(&map->async_list);
+
+ if (ret != 0)
+ map->async_ret = ret;
+
+ spin_unlock(&map->async_lock);
+
+ schedule_work(&async->cleanup);
+
+ if (wake)
+ wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = list_empty(&map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+
+/**
+ * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed. Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Nothing to do with no async support */
+ if (!map->bus->async_write)
+ return 0;
+
+ wait_event(map->async_waitq, regmap_async_is_done(map));
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = map->async_ret;
+ map->async_ret = 0;
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
/**
* regmap_register_patch: Register and apply register updates to be applied
* on device initialistion
@@ -1357,7 +1721,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
if (map->patch)
return -EBUSY;
- map->lock(map);
+ map->lock(map->lock_arg);
bypass = map->cache_bypass;
@@ -1385,7 +1749,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
out:
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}