summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/configfs.h4
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/freezer.h10
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ide.h74
-rw-r--r--include/linux/inet.h7
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h32
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/linux/mmc/sdio_func.h21
-rw-r--r--include/linux/nfs_fs.h10
-rw-r--r--include/linux/nfs_iostat.h119
-rw-r--r--include/linux/nfs_page.h9
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/pci.h57
-rw-r--r--include/linux/pci_hotplug.h14
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pci_regs.h1
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm.h314
-rw-r--r--include/linux/pm_wakeup.h28
-rw-r--r--include/linux/pnp.h146
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/spi/mmc_spi.h9
-rw-r--r--include/linux/sunrpc/clnt.h7
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/suspend.h14
31 files changed, 730 insertions, 207 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0601075d09a1..a17177639376 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_check_mem_region(resource_size_t start, resource_size_t n,
const char *name);
+#ifdef CONFIG_PM_SLEEP
+void __init acpi_old_suspend_ordering(void);
+#endif /* CONFIG_PM_SLEEP */
#else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 32a441b05fd5..88d68081a0f1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -985,6 +985,9 @@ static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
static inline int blk_integrity_rq(struct request *rq)
{
+ if (rq->bio == NULL)
+ return 0;
+
return bio_integrity(rq->bio);
}
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 3ae65b1bf90f..0488f937634a 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -165,8 +165,8 @@ struct configfs_item_operations {
};
struct configfs_group_operations {
- struct config_item *(*make_item)(struct config_group *group, const char *name);
- struct config_group *(*make_group)(struct config_group *group, const char *name);
+ int (*make_item)(struct config_group *group, const char *name, struct config_item **new_item);
+ int (*make_group)(struct config_group *group, const char *name, struct config_group **new_group);
int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
diff --git a/include/linux/device.h b/include/linux/device.h
index 6a2d04c011bc..f71a78d123ae 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -68,6 +68,8 @@ struct bus_type {
int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev);
+ struct pm_ext_ops *pm;
+
struct bus_type_private *p;
};
@@ -131,6 +133,8 @@ struct device_driver {
int (*resume) (struct device *dev);
struct attribute_group **groups;
+ struct pm_ops *pm;
+
struct driver_private *p;
};
@@ -197,6 +201,8 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+
+ struct pm_ops *pm;
};
extern int __must_check class_register(struct class *class);
@@ -248,8 +254,11 @@ struct device_type {
struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
void (*release)(struct device *dev);
+
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+
+ struct pm_ops *pm;
};
/* interface for exporting device attributes */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 08934995c7ab..deddeedf3257 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -128,6 +128,15 @@ static inline void set_freezable(void)
}
/*
+ * Tell the freezer that the current task should be frozen by it and that it
+ * should send a fake signal to the task to freeze it.
+ */
+static inline void set_freezable_with_signal(void)
+{
+ current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
+}
+
+/*
* Freezer-friendly wrappers around wait_event_interruptible() and
* wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
*/
@@ -174,6 +183,7 @@ static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
+static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 52e510a0aec2..c6455dadb21b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1729,6 +1729,8 @@ static inline void invalidate_remote_inode(struct inode *inode)
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
+extern void generic_sync_sb_inodes(struct super_block *sb,
+ struct writeback_control *wbc);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ac4eeb2932ef..4726126f5a59 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -139,6 +139,12 @@ struct ide_io_ports {
#define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */
/*
+ * Op codes for special requests to be handled by ide_special_rq().
+ * Values should be in the range of 0x20 to 0x3f.
+ */
+#define REQ_DRIVE_RESET 0x20
+
+/*
* Check for an interrupt and acknowledge the interrupt status
*/
struct hwif_s;
@@ -171,7 +177,7 @@ typedef struct hw_regs_s {
int irq; /* our irq number */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
hwif_chipset_t chipset;
- struct device *dev;
+ struct device *dev, *parent;
} hw_regs_t;
void ide_init_port_data(struct hwif_s *, unsigned int);
@@ -405,8 +411,8 @@ typedef struct ide_drive_s {
struct ide_port_info;
struct ide_port_ops {
- /* host specific initialization of devices on a port */
- void (*port_init_devs)(struct hwif_s *);
+ /* host specific initialization of a device */
+ void (*init_dev)(ide_drive_t *);
/* routine to program host for PIO mode */
void (*set_pio_mode)(ide_drive_t *, const u8);
/* routine to program host for DMA mode */
@@ -565,8 +571,6 @@ typedef struct hwgroup_s {
unsigned int sleeping : 1;
/* BOOL: polling active & poll_timeout field valid */
unsigned int polling : 1;
- /* BOOL: in a polling reset situation. Must not trigger another reset yet */
- unsigned int resetting : 1;
/* current drive */
ide_drive_t *drive;
@@ -786,7 +790,6 @@ struct ide_driver_s {
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
int (*end_request)(ide_drive_t *, int, int);
ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
- ide_startstop_t (*abort)(ide_drive_t *, struct request *rq);
struct device_driver gen_driver;
int (*probe)(ide_drive_t *);
void (*remove)(ide_drive_t *);
@@ -801,18 +804,6 @@ struct ide_driver_s {
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
-/*
- * ide_hwifs[] is the master data structure used to keep track
- * of just about everything in ide.c. Whenever possible, routines
- * should be using pointers to a drive (ide_drive_t *) or
- * pointers to a hwif (ide_hwif_t *), rather than indexing this
- * structure directly (the allocation/layout may change!).
- *
- */
-#ifndef _IDE_C
-extern ide_hwif_t ide_hwifs[]; /* master data repository */
-#endif
-
extern int ide_vlb_clk;
extern int ide_pci_clk;
@@ -840,10 +831,6 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
-ide_startstop_t __ide_abort(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_abort(ide_drive_t *, const char *);
-
extern void ide_fix_driveid(struct hd_driveid *);
extern void ide_fixstring(u8 *, const int, const int);
@@ -1271,16 +1258,43 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
u64 ide_get_lba_addr(struct ide_taskfile *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
-typedef struct ide_pio_timings_s {
- int setup_time; /* Address setup (ns) minimum */
- int active_time; /* Active pulse (ns) minimum */
- int cycle_time; /* Cycle time (ns) minimum = */
- /* active + recovery (+ setup for some chips) */
-} ide_pio_timings_t;
+struct ide_timing {
+ u8 mode;
+ u8 setup; /* t1 */
+ u16 act8b; /* t2 for 8-bit io */
+ u16 rec8b; /* t2i for 8-bit io */
+ u16 cyc8b; /* t0 for 8-bit io */
+ u16 active; /* t2 or tD */
+ u16 recover; /* t2i or tK */
+ u16 cycle; /* t0 */
+ u16 udma; /* t2CYCTYP/2 */
+};
+
+enum {
+ IDE_TIMING_SETUP = (1 << 0),
+ IDE_TIMING_ACT8B = (1 << 1),
+ IDE_TIMING_REC8B = (1 << 2),
+ IDE_TIMING_CYC8B = (1 << 3),
+ IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
+ IDE_TIMING_CYC8B,
+ IDE_TIMING_ACTIVE = (1 << 4),
+ IDE_TIMING_RECOVER = (1 << 5),
+ IDE_TIMING_CYCLE = (1 << 6),
+ IDE_TIMING_UDMA = (1 << 7),
+ IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
+ IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
+ IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
+};
+
+struct ide_timing *ide_timing_find_mode(u8);
+u16 ide_pio_cycle_time(ide_drive_t *, u8);
+void ide_timing_merge(struct ide_timing *, struct ide_timing *,
+ struct ide_timing *, unsigned int);
+int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
+
+int ide_scan_pio_blacklist(char *);
-unsigned int ide_pio_cycle_time(ide_drive_t *, u8);
u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
-extern const ide_pio_timings_t ide_pio_timings[6];
int ide_set_pio_mode(ide_drive_t *, u8);
int ide_set_dma_mode(ide_drive_t *, u8);
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 1354080cf8cf..4cca05c9678e 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -44,6 +44,13 @@
#include <linux/types.h>
+/*
+ * These mimic similar macros defined in user-space for inet_ntop(3).
+ * See /usr/include/netinet/in.h .
+ */
+#define INET_ADDRSTRLEN (16)
+#define INET6_ADDRSTRLEN (48)
+
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9927a88674a3..93c45acf249a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -140,8 +140,8 @@ extern struct group_info init_groups;
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
- .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
- .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index c6801bffe76d..2cd07cc29687 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -59,6 +59,7 @@ struct resource_list {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -88,6 +89,10 @@ struct resource_list {
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+/* PnP I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IO_16BIT_ADDR (1<<0)
+#define IORESOURCE_IO_FIXED (1<<1)
+
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index d0c3abed74c2..143cebf0586f 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -135,6 +135,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
+extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
extern void mmc_release_host(struct mmc_host *host);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7ab962fa1d73..10a2080086ca 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -51,8 +51,30 @@ struct mmc_ios {
struct mmc_host_ops {
void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /*
+ * Avoid calling these three functions too often or in a "fast path",
+ * since underlaying controller might implement them in an expensive
+ * and/or slow way.
+ *
+ * Also note that these functions might sleep, so don't call them
+ * in the atomic contexts!
+ *
+ * Return values for the get_ro callback should be:
+ * 0 for a read/write card
+ * 1 for a read-only card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ *
+ * Return values for the get_ro callback should be:
+ * 0 for a absent card
+ * 1 for a present card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ */
void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
int (*get_ro)(struct mmc_host *host);
+ int (*get_cd)(struct mmc_host *host);
+
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
};
@@ -89,11 +111,11 @@ struct mmc_host {
unsigned long caps; /* Host capabilities */
#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
-#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */
-#define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */
-#define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */
-#define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */
-#define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */
+#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
+#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
+#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
+#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
+#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 4236fbf0b6fb..14b81f3e5232 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -16,7 +16,6 @@
* Based strongly on code by:
*
* Author: Yong-iL Joh <tolkien@mizi.com>
- * Date : $Date: 2002/06/18 12:37:30 $
*
* Author: Andrew Christian
* 15 May 2002
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index b050f4d7b41f..07bee4a0d457 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -1,7 +1,7 @@
/*
* include/linux/mmc/sdio_func.h
*
- * Copyright 2007 Pierre Ossman
+ * Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,6 +46,8 @@ struct sdio_func {
unsigned max_blksize; /* maximum block size */
unsigned cur_blksize; /* current block size */
+ unsigned enable_timeout; /* max enable timeout in msec */
+
unsigned int state; /* function state */
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
@@ -120,23 +122,22 @@ extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz);
extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler);
extern int sdio_release_irq(struct sdio_func *func);
-extern unsigned char sdio_readb(struct sdio_func *func,
- unsigned int addr, int *err_ret);
-extern unsigned short sdio_readw(struct sdio_func *func,
- unsigned int addr, int *err_ret);
-extern unsigned long sdio_readl(struct sdio_func *func,
- unsigned int addr, int *err_ret);
+extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
+
+extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
unsigned int addr, int count);
extern int sdio_readsb(struct sdio_func *func, void *dst,
unsigned int addr, int count);
-extern void sdio_writeb(struct sdio_func *func, unsigned char b,
+extern void sdio_writeb(struct sdio_func *func, u8 b,
unsigned int addr, int *err_ret);
-extern void sdio_writew(struct sdio_func *func, unsigned short b,
+extern void sdio_writew(struct sdio_func *func, u16 b,
unsigned int addr, int *err_ret);
-extern void sdio_writel(struct sdio_func *func, unsigned long b,
+extern void sdio_writel(struct sdio_func *func, u32 b,
unsigned int addr, int *err_ret);
extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 27d6a8d98cef..29d261918734 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -12,9 +12,19 @@
#include <linux/magic.h>
/* Default timeout values */
+#define NFS_DEF_UDP_TIMEO (11)
+#define NFS_DEF_UDP_RETRANS (3)
+#define NFS_DEF_TCP_TIMEO (600)
+#define NFS_DEF_TCP_RETRANS (2)
+
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
+#define NFS_DEF_ACREGMIN (3)
+#define NFS_DEF_ACREGMAX (60)
+#define NFS_DEF_ACDIRMIN (30)
+#define NFS_DEF_ACDIRMAX (60)
+
/*
* When flushing a cluster of dirty pages, there can be different
* strategies:
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
new file mode 100644
index 000000000000..1cb9a3fed2b3
--- /dev/null
+++ b/include/linux/nfs_iostat.h
@@ -0,0 +1,119 @@
+/*
+ * User-space visible declarations for NFS client per-mount
+ * point statistics
+ *
+ * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
+ *
+ * NFS client per-mount statistics provide information about the
+ * health of the NFS client and the health of each NFS mount point.
+ * Generally these are not for detailed problem diagnosis, but
+ * simply to indicate that there is a problem.
+ *
+ * These counters are not meant to be human-readable, but are meant
+ * to be integrated into system monitoring tools such as "sar" and
+ * "iostat". As such, the counters are sampled by the tools over
+ * time, and are never zeroed after a file system is mounted.
+ * Moving averages can be computed by the tools by taking the
+ * difference between two instantaneous samples and dividing that
+ * by the time between the samples.
+ */
+
+#ifndef _LINUX_NFS_IOSTAT
+#define _LINUX_NFS_IOSTAT
+
+#define NFS_IOSTAT_VERS "1.0"
+
+/*
+ * NFS byte counters
+ *
+ * 1. SERVER - the number of payload bytes read from or written
+ * to the server by the NFS client via an NFS READ or WRITE
+ * request.
+ *
+ * 2. NORMAL - the number of bytes read or written by applications
+ * via the read(2) and write(2) system call interfaces.
+ *
+ * 3. DIRECT - the number of bytes read or written from files
+ * opened with the O_DIRECT flag.
+ *
+ * These counters give a view of the data throughput into and out
+ * of the NFS client. Comparing the number of bytes requested by
+ * an application with the number of bytes the client requests from
+ * the server can provide an indication of client efficiency
+ * (per-op, cache hits, etc).
+ *
+ * These counters can also help characterize which access methods
+ * are in use. DIRECT by itself shows whether there is any O_DIRECT
+ * traffic. NORMAL + DIRECT shows how much data is going through
+ * the system call interface. A large amount of SERVER traffic
+ * without much NORMAL or DIRECT traffic shows that applications
+ * are using mapped files.
+ *
+ * NFS page counters
+ *
+ * These count the number of pages read or written via nfs_readpage(),
+ * nfs_readpages(), or their write equivalents.
+ *
+ * NB: When adding new byte counters, please include the measured
+ * units in the name of each byte counter to help users of this
+ * interface determine what exactly is being counted.
+ */
+enum nfs_stat_bytecounters {
+ NFSIOS_NORMALREADBYTES = 0,
+ NFSIOS_NORMALWRITTENBYTES,
+ NFSIOS_DIRECTREADBYTES,
+ NFSIOS_DIRECTWRITTENBYTES,
+ NFSIOS_SERVERREADBYTES,
+ NFSIOS_SERVERWRITTENBYTES,
+ NFSIOS_READPAGES,
+ NFSIOS_WRITEPAGES,
+ __NFSIOS_BYTESMAX,
+};
+
+/*
+ * NFS event counters
+ *
+ * These counters provide a low-overhead way of monitoring client
+ * activity without enabling NFS trace debugging. The counters
+ * show the rate at which VFS requests are made, and how often the
+ * client invalidates its data and attribute caches. This allows
+ * system administrators to monitor such things as how close-to-open
+ * is working, and answer questions such as "why are there so many
+ * GETATTR requests on the wire?"
+ *
+ * They also count anamolous events such as short reads and writes,
+ * silly renames due to close-after-delete, and operations that
+ * change the size of a file (such operations can often be the
+ * source of data corruption if applications aren't using file
+ * locking properly).
+ */
+enum nfs_stat_eventcounters {
+ NFSIOS_INODEREVALIDATE = 0,
+ NFSIOS_DENTRYREVALIDATE,
+ NFSIOS_DATAINVALIDATE,
+ NFSIOS_ATTRINVALIDATE,
+ NFSIOS_VFSOPEN,
+ NFSIOS_VFSLOOKUP,
+ NFSIOS_VFSACCESS,
+ NFSIOS_VFSUPDATEPAGE,
+ NFSIOS_VFSREADPAGE,
+ NFSIOS_VFSREADPAGES,
+ NFSIOS_VFSWRITEPAGE,
+ NFSIOS_VFSWRITEPAGES,
+ NFSIOS_VFSGETDENTS,
+ NFSIOS_VFSSETATTR,
+ NFSIOS_VFSFLUSH,
+ NFSIOS_VFSFSYNC,
+ NFSIOS_VFSLOCK,
+ NFSIOS_VFSRELEASE,
+ NFSIOS_CONGESTIONWAIT,
+ NFSIOS_SETATTRTRUNC,
+ NFSIOS_EXTENDWRITE,
+ NFSIOS_SILLYRENAME,
+ NFSIOS_SHORTREAD,
+ NFSIOS_SHORTWRITE,
+ NFSIOS_DELAY,
+ __NFSIOS_COUNTSMAX,
+};
+
+#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index a1676e19e491..3c60685d972b 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,9 +27,12 @@
/*
* Valid flags for a dirty buffer
*/
-#define PG_BUSY 0
-#define PG_NEED_COMMIT 1
-#define PG_NEED_RESCHED 2
+enum {
+ PG_BUSY = 0,
+ PG_CLEAN,
+ PG_NEED_COMMIT,
+ PG_NEED_RESCHED,
+};
struct nfs_inode;
struct nfs_page {
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 24263bb8e0be..8c77c11224d1 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -829,9 +829,8 @@ struct nfs_rpc_ops {
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
- int (*file_open) (struct inode *, struct file *);
- int (*file_release) (struct inode *, struct file *);
int (*lock)(struct file *, int, struct file_lock *);
+ int (*lock_check_bounds)(const struct file_lock *);
void (*clear_acl_cache)(struct inode *);
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d18b1dd49fab..a6a088e1a804 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -17,8 +17,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-/* Include the pci register defines */
-#include <linux/pci_regs.h>
+#include <linux/pci_regs.h> /* The pci register defines */
/*
* The PCI interface treats multi-function devices as independent
@@ -49,12 +48,22 @@
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/errno.h>
+#include <linux/kobject.h>
#include <asm/atomic.h>
#include <linux/device.h>
/* Include the ID list */
#include <linux/pci_ids.h>
+/* pci_slot represents a physical slot */
+struct pci_slot {
+ struct pci_bus *bus; /* The bus this slot is on */
+ struct list_head list; /* node in list of slots on this bus */
+ struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
+ unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ struct kobject kobj;
+};
+
/* File state for mmap()s on /proc/bus/pci/X/Y */
enum pci_mmap_state {
pci_mmap_io,
@@ -142,6 +151,7 @@ struct pci_dev {
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+ struct pci_slot *slot; /* Physical slot this device is in */
unsigned int devfn; /* encoded device & function index */
unsigned short vendor;
@@ -167,6 +177,13 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI-speak,
this is D0-D3, D0 being fully functional,
and D3 being off. */
+ int pm_cap; /* PM capability offset in the
+ configuration space */
+ unsigned int pme_support:5; /* Bitmask of states from which PME#
+ can be generated */
+ unsigned int d1_support:1; /* Low power state D1 is supported */
+ unsigned int d2_support:1; /* Low power state D2 is supported */
+ unsigned int no_d1d2:1; /* Only allow D0 and D3 */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -191,7 +208,6 @@ struct pci_dev {
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
- unsigned int no_d1d2:1; /* only allow d0 or d3 */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1;
@@ -267,6 +283,7 @@ struct pci_bus {
struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */
+ struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BUS_NUM_RESOURCES];
/* address space routed to this bus */
@@ -328,7 +345,7 @@ struct pci_bus_region {
struct pci_dynids {
spinlock_t lock; /* protects list, index */
struct list_head list; /* for IDs added at runtime */
- unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
+ unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
};
/* ---------------------------------------------------------------- */
@@ -390,7 +407,7 @@ struct pci_driver {
int (*resume_early) (struct pci_dev *dev);
int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev);
-
+ struct pm_ext_ops *pm;
struct pci_error_handlers *err_handler;
struct device_driver driver;
struct pci_dynids dynids;
@@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
+struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ const char *name);
+void pci_destroy_slot(struct pci_slot *slot);
+void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_prepare_to_sleep(struct pci_dev *dev);
+int pci_back_from_sleep(struct pci_dev *dev);
/* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
@@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
}
+static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ return -EIO;
+}
+
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{
@@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
/* If you want to know what to call your pci_dev, ask this function.
* Again, it's a wrapper around the generic device.
*/
-static inline char *pci_name(struct pci_dev *pdev)
+static inline const char *pci_name(struct pci_dev *pdev)
{
- return pdev->dev.bus_id;
+ return dev_name(&pdev->dev);
}
@@ -1014,7 +1042,9 @@ enum pci_fixup_pass {
pci_fixup_header, /* After reading configuration header */
pci_fixup_final, /* Final phase of device fixups */
pci_fixup_enable, /* pci_enable_device() time */
- pci_fixup_resume, /* pci_enable_device() time */
+ pci_fixup_resume, /* pci_device_resume() */
+ pci_fixup_suspend, /* pci_device_suspend */
+ pci_fixup_resume_early, /* pci_device_resume_early() */
};
/* Anonymous variables would be nice... */
@@ -1036,6 +1066,12 @@ enum pci_fixup_pass {
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
resume##vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
+ resume_early##vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
+ suspend##vendor##device##hook, vendor, device, hook)
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -1060,7 +1096,10 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern int pcibios_add_platform_entries(struct pci_dev *dev);
+int pcibios_add_platform_entries(struct pci_dev *dev);
+void pcibios_disable_device(struct pci_dev *dev);
+int pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state);
#ifdef CONFIG_PCI_MMCONFIG
extern void __init pci_mmcfg_early_init(void);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8f67e8f2a3cc..a08cd06b541a 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
- * @get_address: Called to get pci address of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_max_bus_speed: Called to get the max bus speed for a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
@@ -120,7 +117,6 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*get_address) (struct hotplug_slot *slot, u32 *value);
int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
};
@@ -140,7 +136,6 @@ struct hotplug_slot_info {
u8 attention_status;
u8 latch_status;
u8 adapter_status;
- u32 address;
enum pci_bus_speed max_bus_speed;
enum pci_bus_speed cur_bus_speed;
};
@@ -166,15 +161,14 @@ struct hotplug_slot {
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
- struct kobject kobj;
+ struct pci_slot *pci_slot;
};
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
-extern int pci_hp_register (struct hotplug_slot *slot);
-extern int pci_hp_deregister (struct hotplug_slot *slot);
+extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
+extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
-extern struct kset *pci_hotplug_slots_kset;
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
@@ -227,9 +221,9 @@ struct hotplug_params {
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/actypes.h>
-extern acpi_status acpi_run_oshp(acpi_handle handle);
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
struct hotplug_params *hpp);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
int acpi_root_bridge(acpi_handle handle);
#endif
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1cf4084b51e8..6be6a7943d8b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2190,6 +2190,7 @@
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
+#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
#define PCI_VENDOR_ID_KORENIX 0x1982
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c0c1223c9194..19958b929905 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -231,6 +231,7 @@
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
+#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 3261681c82a4..95ac21ab3a09 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -53,6 +53,7 @@ struct platform_driver {
int (*suspend_late)(struct platform_device *, pm_message_t state);
int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *);
+ struct pm_ext_ops *pm;
struct device_driver driver;
};
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 39a7ee859b67..4ad9de94449a 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -112,7 +112,9 @@ typedef struct pm_message {
int event;
} pm_message_t;
-/*
+/**
+ * struct pm_ops - device PM callbacks
+ *
* Several driver power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
@@ -120,6 +122,284 @@ typedef struct pm_message {
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
+ * The externally visible transitions are handled with the help of the following
+ * callbacks included in this structure:
+ *
+ * @prepare: Prepare the device for the upcoming transition, but do NOT change
+ * its hardware state. Prevent new children of the device from being
+ * registered after @prepare() returns (the driver's subsystem and
+ * generally the rest of the kernel is supposed to prevent new calls to the
+ * probe method from being made too once @prepare() has succeeded). If
+ * @prepare() detects a situation it cannot handle (e.g. registration of a
+ * child already in progress), it may return -EAGAIN, so that the PM core
+ * can execute it once again (e.g. after the new child has been registered)
+ * to recover from the race condition. This method is executed for all
+ * kinds of suspend transitions and is followed by one of the suspend
+ * callbacks: @suspend(), @freeze(), or @poweroff().
+ * The PM core executes @prepare() for all devices before starting to
+ * execute suspend callbacks for any of them, so drivers may assume all of
+ * the other devices to be present and functional while @prepare() is being
+ * executed. In particular, it is safe to make GFP_KERNEL memory
+ * allocations from within @prepare(). However, drivers may NOT assume
+ * anything about the availability of the user space at that time and it
+ * is not correct to request firmware from within @prepare() (it's too
+ * late to do that). [To work around this limitation, drivers may
+ * register suspend and hibernation notifiers that are executed before the
+ * freezing of tasks.]
+ *
+ * @complete: Undo the changes made by @prepare(). This method is executed for
+ * all kinds of resume transitions, following one of the resume callbacks:
+ * @resume(), @thaw(), @restore(). Also called if the state transition
+ * fails before the driver's suspend callback (@suspend(), @freeze(),
+ * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * of the other devices that the PM core has unsuccessfully attempted to
+ * suspend earlier).
+ * The PM core executes @complete() after it has executed the appropriate
+ * resume callback for all devices.
+ *
+ * @suspend: Executed before putting the system into a sleep state in which the
+ * contents of main memory are preserved. Quiesce the device, put it into
+ * a low power state appropriate for the upcoming system state (such as
+ * PCI_D3hot), and enable wakeup events as appropriate.
+ *
+ * @resume: Executed after waking the system up from a sleep state in which the
+ * contents of main memory were preserved. Put the device into the
+ * appropriate state, according to the information saved in memory by the
+ * preceding @suspend(). The driver starts working again, responding to
+ * hardware events and software requests. The hardware may have gone
+ * through a power-off reset, or it may have maintained state from the
+ * previous suspend() which the driver may rely on while resuming. On most
+ * platforms, there are no restrictions on availability of resources like
+ * clocks during @resume().
+ *
+ * @freeze: Hibernation-specific, executed before creating a hibernation image.
+ * Quiesce operations so that a consistent image can be created, but do NOT
+ * otherwise put the device into a low power device state and do NOT emit
+ * system wakeup events. Save in main memory the device settings to be
+ * used by @restore() during the subsequent resume from hibernation or by
+ * the subsequent @thaw(), if the creation of the image or the restoration
+ * of main memory contents from it fails.
+ *
+ * @thaw: Hibernation-specific, executed after creating a hibernation image OR
+ * if the creation of the image fails. Also executed after a failing
+ * attempt to restore the contents of main memory from such an image.
+ * Undo the changes made by the preceding @freeze(), so the device can be
+ * operated in the same way as immediately before the call to @freeze().
+ *
+ * @poweroff: Hibernation-specific, executed after saving a hibernation image.
+ * Quiesce the device, put it into a low power state appropriate for the
+ * upcoming system state (such as PCI_D3hot), and enable wakeup events as
+ * appropriate.
+ *
+ * @restore: Hibernation-specific, executed after restoring the contents of main
+ * memory from a hibernation image. Driver starts working again,
+ * responding to hardware events and software requests. Drivers may NOT
+ * make ANY assumptions about the hardware state right prior to @restore().
+ * On most platforms, there are no restrictions on availability of
+ * resources like clocks during @restore().
+ *
+ * All of the above callbacks, except for @complete(), return error codes.
+ * However, the error codes returned by the resume operations, @resume(),
+ * @thaw(), and @restore(), do not cause the PM core to abort the resume
+ * transition during which they are returned. The error codes returned in
+ * that cases are only printed by the PM core to the system logs for debugging
+ * purposes. Still, it is recommended that drivers only return error codes
+ * from their resume methods in case of an unrecoverable failure (i.e. when the
+ * device being handled refuses to resume and becomes unusable) to allow us to
+ * modify the PM core in the future, so that it can avoid attempting to handle
+ * devices that failed to resume and their children.
+ *
+ * It is allowed to unregister devices while the above callbacks are being
+ * executed. However, it is not allowed to unregister a device from within any
+ * of its own callbacks.
+ */
+
+struct pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
+};
+
+/**
+ * struct pm_ext_ops - extended device PM callbacks
+ *
+ * Some devices require certain operations related to suspend and hibernation
+ * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
+ * is defined, adding callbacks to be executed with interrupts disabled to
+ * 'struct pm_ops'.
+ *
+ * The following callbacks included in 'struct pm_ext_ops' are executed with
+ * the nonboot CPUs switched off and with interrupts disabled on the only
+ * functional CPU. They also are executed with the PM core list of devices
+ * locked, so they must NOT unregister any devices.
+ *
+ * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
+ * actions required for suspending the device that need interrupts to be
+ * disabled
+ *
+ * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
+ * actions required for resuming the device that need interrupts to be
+ * disabled
+ *
+ * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
+ * actions required for freezing the device that need interrupts to be
+ * disabled
+ *
+ * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
+ * actions required for thawing the device that need interrupts to be
+ * disabled
+ *
+ * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
+ * actions required for handling the device that need interrupts to be
+ * disabled
+ *
+ * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
+ * actions required for restoring the operations of the device that need
+ * interrupts to be disabled
+ *
+ * All of the above callbacks return error codes, but the error codes returned
+ * by the resume operations, @resume_noirq(), @thaw_noirq(), and
+ * @restore_noirq(), do not cause the PM core to abort the resume transition
+ * during which they are returned. The error codes returned in that cases are
+ * only printed by the PM core to the system logs for debugging purposes.
+ * Still, as stated above, it is recommended that drivers only return error
+ * codes from their resume methods if the device being handled fails to resume
+ * and is not usable any more.
+ */
+
+struct pm_ext_ops {
+ struct pm_ops base;
+ int (*suspend_noirq)(struct device *dev);
+ int (*resume_noirq)(struct device *dev);
+ int (*freeze_noirq)(struct device *dev);
+ int (*thaw_noirq)(struct device *dev);
+ int (*poweroff_noirq)(struct device *dev);
+ int (*restore_noirq)(struct device *dev);
+};
+
+/**
+ * PM_EVENT_ messages
+ *
+ * The following PM_EVENT_ messages are defined for the internal use of the PM
+ * core, in order to provide a mechanism allowing the high level suspend and
+ * hibernation code to convey the necessary information to the device PM core
+ * code:
+ *
+ * ON No transition.
+ *
+ * FREEZE System is going to hibernate, call ->prepare() and ->freeze()
+ * for all devices.
+ *
+ * SUSPEND System is going to suspend, call ->prepare() and ->suspend()
+ * for all devices.
+ *
+ * HIBERNATE Hibernation image has been saved, call ->prepare() and
+ * ->poweroff() for all devices.
+ *
+ * QUIESCE Contents of main memory are going to be restored from a (loaded)
+ * hibernation image, call ->prepare() and ->freeze() for all
+ * devices.
+ *
+ * RESUME System is resuming, call ->resume() and ->complete() for all
+ * devices.
+ *
+ * THAW Hibernation image has been created, call ->thaw() and
+ * ->complete() for all devices.
+ *
+ * RESTORE Contents of main memory have been restored from a hibernation
+ * image, call ->restore() and ->complete() for all devices.
+ *
+ * RECOVER Creation of a hibernation image or restoration of the main
+ * memory contents from a hibernation image has failed, call
+ * ->thaw() and ->complete() for all devices.
+ */
+
+#define PM_EVENT_ON 0x0000
+#define PM_EVENT_FREEZE 0x0001
+#define PM_EVENT_SUSPEND 0x0002
+#define PM_EVENT_HIBERNATE 0x0004
+#define PM_EVENT_QUIESCE 0x0008
+#define PM_EVENT_RESUME 0x0010
+#define PM_EVENT_THAW 0x0020
+#define PM_EVENT_RESTORE 0x0040
+#define PM_EVENT_RECOVER 0x0080
+
+#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
+
+#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
+#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
+#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
+#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
+#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
+#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
+#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
+#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
+#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
+
+/**
+ * Device power management states
+ *
+ * These state labels are used internally by the PM core to indicate the current
+ * status of a device with respect to the PM core operations.
+ *
+ * DPM_ON Device is regarded as operational. Set this way
+ * initially and when ->complete() is about to be called.
+ * Also set when ->prepare() fails.
+ *
+ * DPM_PREPARING Device is going to be prepared for a PM transition. Set
+ * when ->prepare() is about to be called.
+ *
+ * DPM_RESUMING Device is going to be resumed. Set when ->resume(),
+ * ->thaw(), or ->restore() is about to be called.
+ *
+ * DPM_SUSPENDING Device has been prepared for a power transition. Set
+ * when ->prepare() has just succeeded.
+ *
+ * DPM_OFF Device is regarded as inactive. Set immediately after
+ * ->suspend(), ->freeze(), or ->poweroff() has succeeded.
+ * Also set when ->resume()_noirq, ->thaw_noirq(), or
+ * ->restore_noirq() is about to be called.
+ *
+ * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
+ * ->suspend_noirq(), ->freeze_noirq(), or
+ * ->poweroff_noirq() has just succeeded.
+ */
+
+enum dpm_state {
+ DPM_INVALID,
+ DPM_ON,
+ DPM_PREPARING,
+ DPM_RESUMING,
+ DPM_SUSPENDING,
+ DPM_OFF,
+ DPM_OFF_IRQ,
+};
+
+struct dev_pm_info {
+ pm_message_t power_state;
+ unsigned can_wakeup:1;
+ unsigned should_wakeup:1;
+ enum dpm_state status; /* Owned by the PM core */
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+#endif
+};
+
+/*
+ * The PM_EVENT_ messages are also used by drivers implementing the legacy
+ * suspend framework, based on the ->suspend() and ->resume() callbacks common
+ * for suspend and hibernation transitions, according to the rules below.
+ */
+
+/* Necessary, because several drivers use PM_EVENT_PRETHAW */
+#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
+
+/*
* One transition is triggered by resume(), after a suspend() call; the
* message is implicit:
*
@@ -164,35 +444,13 @@ typedef struct pm_message {
* or from system low-power states such as standby or suspend-to-RAM.
*/
-#define PM_EVENT_ON 0
-#define PM_EVENT_FREEZE 1
-#define PM_EVENT_SUSPEND 2
-#define PM_EVENT_HIBERNATE 4
-#define PM_EVENT_PRETHAW 8
-
-#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
-
-#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
-#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
-#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
-#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
-#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
-
-struct dev_pm_info {
- pm_message_t power_state;
- unsigned can_wakeup:1;
- unsigned should_wakeup:1;
- bool sleeping:1; /* Owned by the PM core */
-#ifdef CONFIG_PM_SLEEP
- struct list_head entry;
-#endif
-};
+#ifdef CONFIG_PM_SLEEP
+extern void device_pm_lock(void);
+extern void device_power_up(pm_message_t state);
+extern void device_resume(pm_message_t state);
+extern void device_pm_unlock(void);
extern int device_power_down(pm_message_t state);
-extern void device_power_up(void);
-extern void device_resume(void);
-
-#ifdef CONFIG_PM_SLEEP
extern int device_suspend(pm_message_t state);
extern int device_prepare_suspend(pm_message_t state);
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index f0d0b2cb8d20..0aae7776185e 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = dev->power.should_wakeup = !!val;
}
+static inline void device_set_wakeup_capable(struct device *dev, int val)
+{
+ dev->power.can_wakeup = !!val;
+}
+
static inline int device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
@@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
static inline int device_may_wakeup(struct device *dev)
{
- return dev->power.can_wakeup & dev->power.should_wakeup;
-}
-
-/*
- * Platform hook to activate device wakeup capability, if that's not already
- * handled by enable_irq_wake() etc.
- * Returns zero on success, else negative errno
- */
-extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
-
-static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
-{
- if (platform_enable_wakeup)
- return (*platform_enable_wakeup)(dev, is_on);
- return 0;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
#else /* !CONFIG_PM */
@@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = !!val;
}
+static inline void device_set_wakeup_capable(struct device *dev, int val) { }
+
static inline int device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
@@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
#define device_set_wakeup_enable(dev, val) do {} while (0)
#define device_may_wakeup(dev) 0
-static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
-{
- return 0;
-}
-
#endif /* !CONFIG_PM */
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 63b128d512fb..1ce54b63085d 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -1,6 +1,8 @@
/*
* Linux Plug and Play Support
* Copyright by Adam Belay <ambx1@neo.rr.com>
+ * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
#ifndef _LINUX_PNP_H
@@ -15,7 +17,6 @@
struct pnp_protocol;
struct pnp_dev;
-struct pnp_resource_table;
/*
* Resource Management
@@ -24,7 +25,14 @@ struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
static inline int pnp_resource_valid(struct resource *res)
{
- if (res && !(res->flags & IORESOURCE_UNSET))
+ if (res)
+ return 1;
+ return 0;
+}
+
+static inline int pnp_resource_enabled(struct resource *res)
+{
+ if (res && !(res->flags & IORESOURCE_DISABLED))
return 1;
return 0;
}
@@ -40,19 +48,31 @@ static inline resource_size_t pnp_resource_len(struct resource *res)
static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
}
static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->end;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
}
static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IO | IORESOURCE_AUTO;
}
static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
@@ -63,25 +83,41 @@ static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar));
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
}
static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
}
static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
}
static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_MEM | IORESOURCE_AUTO;
}
static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
@@ -92,18 +128,30 @@ static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar));
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
}
static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
}
static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IRQ | IORESOURCE_AUTO;
}
static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
@@ -114,12 +162,20 @@ static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
}
static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_DMA | IORESOURCE_AUTO;
}
static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
@@ -128,57 +184,6 @@ static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
}
-#define PNP_PORT_FLAG_16BITADDR (1<<0)
-#define PNP_PORT_FLAG_FIXED (1<<1)
-
-struct pnp_port {
- unsigned short min; /* min base number */
- unsigned short max; /* max base number */
- unsigned char align; /* align boundary */
- unsigned char size; /* size of range */
- unsigned char flags; /* port flags */
- unsigned char pad; /* pad */
- struct pnp_port *next; /* next port */
-};
-
-#define PNP_IRQ_NR 256
-struct pnp_irq {
- DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */
- unsigned char flags; /* IRQ flags */
- unsigned char pad; /* pad */
- struct pnp_irq *next; /* next IRQ */
-};
-
-struct pnp_dma {
- unsigned char map; /* bitmask for DMA channels */
- unsigned char flags; /* DMA flags */
- struct pnp_dma *next; /* next port */
-};
-
-struct pnp_mem {
- unsigned int min; /* min base number */
- unsigned int max; /* max base number */
- unsigned int align; /* align boundary */
- unsigned int size; /* size of range */
- unsigned char flags; /* memory flags */
- unsigned char pad; /* pad */
- struct pnp_mem *next; /* next memory resource */
-};
-
-#define PNP_RES_PRIORITY_PREFERRED 0
-#define PNP_RES_PRIORITY_ACCEPTABLE 1
-#define PNP_RES_PRIORITY_FUNCTIONAL 2
-#define PNP_RES_PRIORITY_INVALID 65535
-
-struct pnp_option {
- unsigned short priority; /* priority */
- struct pnp_port *port; /* first port */
- struct pnp_irq *irq; /* first IRQ */
- struct pnp_dma *dma; /* first DMA */
- struct pnp_mem *mem; /* first memory resource */
- struct pnp_option *next; /* used to chain dependent resources */
-};
-
/*
* Device Management
*/
@@ -246,9 +251,9 @@ struct pnp_dev {
int active;
int capabilities;
- struct pnp_option *independent;
- struct pnp_option *dependent;
- struct pnp_resource_table *res;
+ unsigned int num_dependent_sets;
+ struct list_head resources;
+ struct list_head options;
char name[PNP_NAME_LEN]; /* contains a human-readable name */
int flags; /* used by protocols */
@@ -425,6 +430,8 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv);
extern struct list_head pnp_cards;
/* resource management */
+int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base,
+ resource_size_t size);
int pnp_auto_config_dev(struct pnp_dev *dev);
int pnp_start_dev(struct pnp_dev *dev);
int pnp_stop_dev(struct pnp_dev *dev);
@@ -452,6 +459,9 @@ static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return
static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
/* resource management */
+static inline int pnp_possible_config(struct pnp_dev *dev, int type,
+ resource_size_t base,
+ resource_size_t size) { return 0; }
static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21349173d148..1941d8b5cf11 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1062,12 +1062,6 @@ struct task_struct {
#endif
struct list_head tasks;
- /*
- * ptrace_list/ptrace_children forms the list of my children
- * that were stolen by a ptracer.
- */
- struct list_head ptrace_children;
- struct list_head ptrace_list;
struct mm_struct *mm, *active_mm;
@@ -1089,18 +1083,25 @@ struct task_struct {
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
- * p->parent->pid)
+ * p->real_parent->pid)
*/
- struct task_struct *real_parent; /* real parent process (when being debugged) */
- struct task_struct *parent; /* parent process */
+ struct task_struct *real_parent; /* real parent process */
+ struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
/*
- * children/sibling forms the list of my children plus the
- * tasks I'm ptracing.
+ * children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
+ /*
+ * ptraced is the list of tasks this task is using ptrace on.
+ * This includes both natural children and PTRACE_ATTACH targets.
+ * p->ptrace_entry is p's link on the p->parent->ptraced list.
+ */
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1494,6 +1495,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
+#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1875,9 +1877,6 @@ extern void wait_task_inactive(struct task_struct * p);
#define wait_task_inactive(p) do { } while (0)
#endif
-#define remove_parent(p) list_del_init(&(p)->sibling)
-#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
-
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
#define for_each_process(p) \
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index d5ca78b93a3b..a3626aedaec9 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -23,6 +23,15 @@ struct mmc_spi_platform_data {
/* sense switch on sd cards */
int (*get_ro)(struct device *);
+ /*
+ * If board does not use CD interrupts, driver can optimize polling
+ * using this function.
+ */
+ int (*get_cd)(struct device *);
+
+ /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
+ unsigned long caps;
+
/* how long to debounce card detect, in msecs */
u16 detect_delay;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6fff7f82ef12..e5bfe01ee305 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -42,7 +42,8 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_discrtry : 1,/* disconnect before retry */
- cl_autobind : 1;/* use getport() */
+ cl_autobind : 1,/* use getport() */
+ cl_chatty : 1;/* be verbose */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -114,6 +115,7 @@ struct rpc_create_args {
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5)
+#define RPC_CLNT_CREATE_QUIET (1UL << 6)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -123,6 +125,9 @@ void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
int rpcb_register(u32, u32, int, unsigned short, int *);
+int rpcb_v4_register(const u32 program, const u32 version,
+ const struct sockaddr *address,
+ const char *netid, int *result);
int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
void rpcb_getport_async(struct rpc_task *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d1a5c8c1a0f1..64981a2f1cae 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -135,7 +135,6 @@ struct rpc_task_setup {
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
-#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
#define RPC_TASK_RUNNING 0
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a6977423baf7..e8e69159af71 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
* that implement @begin(), but platforms implementing @begin() should
* also provide a @end() which cleans up transitions aborted before
* @enter().
+ *
+ * @recover: Recover the platform from a suspend failure.
+ * Called by the PM core if the suspending of devices fails.
+ * This callback is optional and should only be implemented by platforms
+ * which require special recovery actions in that situation.
*/
struct platform_suspend_ops {
int (*valid)(suspend_state_t state);
@@ -94,6 +99,7 @@ struct platform_suspend_ops {
int (*enter)(suspend_state_t state);
void (*finish)(void);
void (*end)(void);
+ void (*recover)(void);
};
#ifdef CONFIG_SUSPEND
@@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
* The methods in this structure allow a platform to carry out special
* operations required by it during a hibernation transition.
*
- * All the methods below must be implemented.
+ * All the methods below, except for @recover(), must be implemented.
*
* @begin: Tell the platform driver that we're starting hibernation.
* Called right after shrinking memory and before freezing devices.
@@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
* @restore_cleanup: Clean up after a failing image restoration.
* Called right after the nonboot CPUs have been enabled and before
* thawing devices (runs with IRQs on).
+ *
+ * @recover: Recover the platform from a failure to suspend devices.
+ * Called by the PM core if the suspending of devices during hibernation
+ * fails. This callback is optional and should only be implemented by
+ * platforms which require special recovery actions in that situation.
*/
struct platform_hibernation_ops {
int (*begin)(void);
@@ -200,6 +211,7 @@ struct platform_hibernation_ops {
void (*leave)(void);
int (*pre_restore)(void);
void (*restore_cleanup)(void);
+ void (*recover)(void);
};
#ifdef CONFIG_HIBERNATION