summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2011-12-15 22:02:34 -0800
committerOlof Johansson <olof@lixom.net>2011-12-15 22:02:34 -0800
commit02735a29d8ce882ec698803f064e17888874780c (patch)
tree6a4afa3bc8b6d4334df24910a56f77adf126b0c7 /include
parent8d685b7f4d9c9882442bf1b492558d5f17b694fa (diff)
parent3d911ad22e8405c1a333a6812e405cb1a5ae9829 (diff)
downloadlinux-stable-02735a29d8ce882ec698803f064e17888874780c.tar.gz
linux-stable-02735a29d8ce882ec698803f064e17888874780c.tar.bz2
linux-stable-02735a29d8ce882ec698803f064e17888874780c.zip
Merge branch 'at91/defconfig' into next/cleanup
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/unistd.h8
-rw-r--r--include/drm/drm_pciids.h8
-rw-r--r--include/drm/exynos_drm.h9
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h9
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pkt_sched.h6
-rw-r--r--include/linux/pm.h229
-rw-r--r--include/linux/pstore.h4
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/sigma.h13
-rw-r--r--include/net/dst.h7
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_sock.h2
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h19
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/red.h15
-rw-r--r--include/net/route.h4
-rw-r--r--include/target/target_core_base.h46
-rw-r--r--include/target/target_core_transport.h24
-rw-r--r--include/video/omapdss.h7
29 files changed, 223 insertions, 217 deletions
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index f4c38d8c6674..2292d1af9d70 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
#undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
/*
* All syscalls below here should go away really,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f81676f1b310..4e4fbb820e20 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -197,6 +197,14 @@
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 1d161cb3aca5..12050434d57a 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -32,17 +32,16 @@
/**
* User-desired buffer creation information structure.
*
- * @size: requested size for the object.
+ * @size: user-desired memory allocation size.
* - this size value would be page-aligned internally.
* @flags: user request for setting memory type or cache attributes.
- * @handle: returned handle for the object.
- * @pad: just padding to be 64-bit aligned.
+ * @handle: returned a handle to created gem object.
+ * - this handle will be set by gem module of kernel side.
*/
struct drm_exynos_gem_create {
- unsigned int size;
+ uint64_t size;
unsigned int flags;
unsigned int handle;
- unsigned int pad;
};
/**
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 139c4db55f17..c86c940d1de3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
* @max_idle_ns: max idle time permitted by the clocksource (nsecs)
+ * @maxadj maximum adjustment value to mult (~11%)
* @flags: flags describing special properties
* @archdata: arch-specific data
* @suspend: suspend function for the clocksource, if necessary
@@ -172,7 +173,7 @@ struct clocksource {
u32 mult;
u32 shift;
u64 max_idle_ns;
-
+ u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 154bf5683015..66ed067fb729 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
extern void __user *compat_alloc_user_space(unsigned long len);
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 4df926199369..ed9f74f6c519 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e3130220ce3e..e0bc4ffb8e7f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -393,8 +393,8 @@ struct inodes_stat_t {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
#include <linux/atomic.h>
+#include <linux/shrinker.h>
#include <asm/byteorder.h>
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
extern int current_umask(void);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 96efa6794ea5..c3da42dd22ba 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -172,6 +172,7 @@ enum {
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT,
};
enum {
@@ -179,6 +180,7 @@ enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+ TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
};
struct ftrace_event_call {
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 94b1e356c02a..32574eef9394 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,6 +126,8 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#define INIT_TASK_COMM "swapper"
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
- .comm = "swapper", \
+ .comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3dc3a8c2c485..4baadd18f4ad 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -10,6 +10,7 @@
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index cbeb5867cff7..a82ad4dd306a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2536,6 +2536,8 @@ extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 172ba70306d1..2aaee0ca9da8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,8 +517,12 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1e9ebe5e0091..b1f89122bf6a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -822,6 +822,7 @@ struct perf_event {
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
+ struct list_head rb_entry;
/* poll related */
wait_queue_head_t waitq;
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index c5336705921f..7281d5acf2f9 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -30,7 +30,7 @@
*/
struct tc_stats {
- __u64 bytes; /* NUmber of enqueues bytes */
+ __u64 bytes; /* Number of enqueued bytes */
__u32 packets; /* Number of enqueued packets */
__u32 drops; /* Packets dropped because of lack of resources */
__u32 overlimits; /* Number of throttle events when this
@@ -297,7 +297,7 @@ struct tc_htb_glob {
__u32 debug; /* debug flags */
/* stats */
- __u32 direct_pkts; /* count of non shapped packets */
+ __u32 direct_pkts; /* count of non shaped packets */
};
enum {
TCA_HTB_UNSPEC,
@@ -503,7 +503,7 @@ enum {
};
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
struct tc_netem_gimodel {
__u32 p13;
__u32 p31;
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5c4c8b18c8b7..3f3ed83a9aa5 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -54,118 +54,145 @@ typedef struct pm_message {
/**
* struct dev_pm_ops - device PM callbacks
*
- * Several driver power state transitions are externally visible, affecting
+ * Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
- * internal transitions to various low power modes, which are transparent
+ * internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
- * The externally visible transitions are handled with the help of the following
- * callbacks included in this structure:
- *
- * @prepare: Prepare the device for the upcoming transition, but do NOT change
- * its hardware state. Prevent new children of the device from being
- * registered after @prepare() returns (the driver's subsystem and
- * generally the rest of the kernel is supposed to prevent new calls to the
- * probe method from being made too once @prepare() has succeeded). If
- * @prepare() detects a situation it cannot handle (e.g. registration of a
- * child already in progress), it may return -EAGAIN, so that the PM core
- * can execute it once again (e.g. after the new child has been registered)
- * to recover from the race condition. This method is executed for all
- * kinds of suspend transitions and is followed by one of the suspend
- * callbacks: @suspend(), @freeze(), or @poweroff().
- * The PM core executes @prepare() for all devices before starting to
- * execute suspend callbacks for any of them, so drivers may assume all of
- * the other devices to be present and functional while @prepare() is being
- * executed. In particular, it is safe to make GFP_KERNEL memory
- * allocations from within @prepare(). However, drivers may NOT assume
- * anything about the availability of the user space at that time and it
- * is not correct to request firmware from within @prepare() (it's too
- * late to do that). [To work around this limitation, drivers may
- * register suspend and hibernation notifiers that are executed before the
- * freezing of tasks.]
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved. First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types. They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that. If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ * the device from being registered after it has returned (the driver's
+ * subsystem and generally the rest of the kernel is supposed to prevent
+ * new calls to the probe method from being made too once @prepare() has
+ * succeeded). If @prepare() detects a situation it cannot handle (e.g.
+ * registration of a child already in progress), it may return -EAGAIN, so
+ * that the PM core can execute it once again (e.g. after a new child has
+ * been registered) to recover from the race condition.
+ * This method is executed for all kinds of suspend transitions and is
+ * followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ * @poweroff(). The PM core executes subsystem-level @prepare() for all
+ * devices before starting to invoke suspend callbacks for any of them, so
+ * generally devices may be assumed to be functional or to respond to
+ * runtime resume requests while @prepare() is being executed. However,
+ * device drivers may NOT assume anything about the availability of user
+ * space at that time and it is NOT valid to request firmware from within
+ * @prepare() (it's too late to do that). It also is NOT valid to allocate
+ * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ * [To work around these limitations, drivers may register suspend and
+ * hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
- * fails before the driver's suspend callback (@suspend(), @freeze(),
- * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * fails before the driver's suspend callback: @suspend(), @freeze() or
+ * @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
- * The PM core executes @complete() after it has executed the appropriate
- * resume callback for all devices.
+ * The PM core executes subsystem-level @complete() after it has executed
+ * the appropriate resume callbacks for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
- * contents of main memory are preserved. Quiesce the device, put it into
- * a low power state appropriate for the upcoming system state (such as
- * PCI_D3hot), and enable wakeup events as appropriate.
+ * contents of main memory are preserved. The exact action to perform
+ * depends on the device's subsystem (PM domain, device type, class or bus
+ * type), but generally the device must be quiescent after subsystem-level
+ * @suspend() has returned, so that it doesn't do any I/O or DMA.
+ * Subsystem-level @suspend() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @resume: Executed after waking the system up from a sleep state in which the
- * contents of main memory were preserved. Put the device into the
- * appropriate state, according to the information saved in memory by the
- * preceding @suspend(). The driver starts working again, responding to
- * hardware events and software requests. The hardware may have gone
- * through a power-off reset, or it may have maintained state from the
- * previous suspend() which the driver may rely on while resuming. On most
- * platforms, there are no restrictions on availability of resources like
- * clocks during @resume().
+ * contents of main memory were preserved. The exact action to perform
+ * depends on the device's subsystem, but generally the driver is expected
+ * to start working again, responding to hardware events and software
+ * requests (the device itself may be left in a low-power state, waiting
+ * for a runtime resume to occur). The state of the device at the time its
+ * driver's @resume() callback is run depends on the platform and subsystem
+ * the device belongs to. On most platforms, there are no restrictions on
+ * availability of resources like clocks during @resume().
+ * Subsystem-level @resume() is executed for all devices after invoking
+ * subsystem-level @resume_noirq() for all of them.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
- * Quiesce operations so that a consistent image can be created, but do NOT
- * otherwise put the device into a low power device state and do NOT emit
- * system wakeup events. Save in main memory the device settings to be
- * used by @restore() during the subsequent resume from hibernation or by
- * the subsequent @thaw(), if the creation of the image or the restoration
- * of main memory contents from it fails.
+ * Analogous to @suspend(), but it should not enable the device to signal
+ * wakeup events or change its power state. The majority of subsystems
+ * (with the notable exception of the PCI bus type) expect the driver-level
+ * @freeze() to save the device settings in memory to be used by @restore()
+ * during the subsequent resume from hibernation.
+ * Subsystem-level @freeze() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
- * if the creation of the image fails. Also executed after a failing
+ * if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
+ * Subsystem-level @thaw() is executed for all devices after invoking
+ * subsystem-level @thaw_noirq() for all of them. It also may be executed
+ * directly after @freeze() in case of a transition error.
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
- * Quiesce the device, put it into a low power state appropriate for the
- * upcoming system state (such as PCI_D3hot), and enable wakeup events as
- * appropriate.
+ * Analogous to @suspend(), but it need not save the device's settings in
+ * memory.
+ * Subsystem-level @poweroff() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
- * memory from a hibernation image. Driver starts working again,
- * responding to hardware events and software requests. Drivers may NOT
- * make ANY assumptions about the hardware state right prior to @restore().
- * On most platforms, there are no restrictions on availability of
- * resources like clocks during @restore().
- *
- * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
- * actions required for suspending the device that need interrupts to be
- * disabled
- *
- * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
- * actions required for resuming the device that need interrupts to be
- * disabled
- *
- * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
- * actions required for freezing the device that need interrupts to be
- * disabled
- *
- * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
- * actions required for thawing the device that need interrupts to be
- * disabled
- *
- * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
- * actions required for handling the device that need interrupts to be
- * disabled
- *
- * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
- * actions required for restoring the operations of the device that need
- * interrupts to be disabled
+ * memory from a hibernation image, analogous to @resume().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
+ * additional operations required for suspending the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @suspend_noirq() is being executed.
+ * It generally is expected that the device will be in a low-power state
+ * (appropriate for the target system sleep state) after subsystem-level
+ * @suspend_noirq() has returned successfully. If the device can generate
+ * system wakeup signals and is enabled to wake up the system, it should be
+ * configured to do so at that time. However, depending on the platform
+ * and device's subsystem, @suspend() may be allowed to put the device into
+ * the low-power state and configure it to generate wakeup signals, in
+ * which case it generally is not necessary to define @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ * operations required for resuming the device that might be racing with
+ * its driver's interrupt handler, which is guaranteed not to run while
+ * @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
+ * additional operations required for freezing the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @freeze_noirq() is being executed.
+ * The power state of the device should not be changed by either @freeze()
+ * or @freeze_noirq() and it should not be configured to signal system
+ * wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
+ * @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
- * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
- * returned. The error codes returned in that cases are only printed by the PM
+ * returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
@@ -174,31 +201,43 @@ typedef struct pm_message {
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
- * executed. However, it is not allowed to unregister a device from within any
- * of its own callbacks.
+ * executed. However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
*
- * There also are the following callbacks related to run-time power management
- * of devices:
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
- * This need not mean that the device should be put into a low power state.
+ * This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
- * power and is capable of generating run-time wake-up events, remote
- * wake-up (i.e., a hardware mechanism allowing the device to request a
- * change of its power state via a wake-up event, such as PCI PME) should
- * be enabled for it.
+ * power and is capable of generating runtime wakeup events, remote wakeup
+ * (i.e., a hardware mechanism allowing the device to request a change of
+ * its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
- * wake-up event generated by hardware or at the request of software. If
- * necessary, put the device into the full power state and restore its
+ * wakeup event generated by hardware or at the request of software. If
+ * necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
- * @runtime_idle: Device appears to be inactive and it might be put into a low
- * power state if all of the necessary conditions are satisfied. Check
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ * low-power state if all of the necessary conditions are satisfied. Check
* these conditions and handle the device as appropriate, possibly queueing
* a suspend request for it. The return value is ignored by the PM core.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
*/
struct dev_pm_ops {
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index ea567321ae3c..2ca8cde5459d 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -35,10 +35,12 @@ struct pstore_info {
spinlock_t buf_lock; /* serialize access to 'buf' */
char *buf;
size_t bufsize;
+ struct mutex read_mutex; /* serialize open/read/close */
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
int (*write)(enum pstore_type_id type, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index a83833a1f7a2..07ceb97d53fa 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -35,7 +35,7 @@ struct shrinker {
/* These are for internal use */
struct list_head list;
- long nr; /* objs pending delete */
+ atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
diff --git a/include/linux/sigma.h b/include/linux/sigma.h
index e2accb3164d8..d0de882c0d96 100644
--- a/include/linux/sigma.h
+++ b/include/linux/sigma.h
@@ -24,7 +24,7 @@ struct sigma_firmware {
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
- u32 crc;
+ __le32 crc;
};
enum {
@@ -40,19 +40,14 @@ enum {
struct sigma_action {
u8 instr;
u8 len_hi;
- u16 len;
- u16 addr;
+ __le16 len;
+ __be16 addr;
unsigned char payload[];
};
static inline u32 sigma_action_len(struct sigma_action *sa)
{
- return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
- return sizeof(*sa) + payload_len + (payload_len % 2);
+ return (sa->len_hi << 16) | le16_to_cpu(sa->len);
}
extern int process_sigma_firmware(struct i2c_client *client, const char *name);
diff --git a/include/net/dst.h b/include/net/dst.h
index 4fb6c4381791..6faec1a60216 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -205,12 +205,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
- if (!mtu)
- mtu = dst->ops->default_mtu(dst);
-
- return mtu;
+ return dst->ops->mtu(dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 9adb99845a56..e1c2ee0eef47 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -17,7 +17,7 @@ struct dst_ops {
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
- unsigned int (*default_mtu)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b897d6e6d0a5..f941964a9931 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -31,6 +31,7 @@
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
*/
struct ip_options {
__be32 faddr;
+ __be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 78c83e62218f..e9ff3fc5e688 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,6 +35,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 4283508b3e18..a88fb6939387 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (net->ct.nf_conntrack_event_cb == NULL)
return;
e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
u32 pid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 0249399e51a7..7a911eca0f18 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -18,6 +18,8 @@ struct netns_ct {
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
diff --git a/include/net/red.h b/include/net/red.h
index 3319f16b3beb..b72a3b833936 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -116,7 +116,7 @@ struct red_parms {
u32 qR; /* Cached random number */
unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ ktime_t qidlestart; /* Start of current idle period */
};
static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
static inline int red_is_idling(struct red_parms *p)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return p->qidlestart.tv64 != 0;
}
static inline void red_start_of_idle_period(struct red_parms *p)
{
- p->qidlestart = psched_get_time();
+ p->qidlestart = ktime_get();
}
static inline void red_end_of_idle_period(struct red_parms *p)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ p->qidlestart.tv64 = 0;
}
static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
diff --git a/include/net/route.h b/include/net/route.h
index db7b3432f07c..91855d185b53 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -71,12 +71,12 @@ struct rtable {
struct fib_info *fi; /* for client ref to shared metrics */
};
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_route_iif != 0;
}
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
{
return rt->rt_route_iif == 0;
}
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7f5fed3c89e1..6873c7dd9145 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_SE_CMD_FAILED = 0x00000400,
+ SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
+ SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
- atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport specific error status */
- int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
- int check_release:1;
- int cmd_wait_set:1;
+ unsigned check_release:1;
+ unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
- struct se_device *se_obj_ptr;
- struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
struct work_struct work;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- u32 t_task_pt_sgl_num;
-
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
- int sess_tearing_down:1;
+ unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
} ____cacheline_aligned;
struct se_device {
- /* Set to 1 if thread is NOT sleeping on thread_sem */
- u8 thread_active;
- u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
- atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
- atomic_t dev_tur_active;
atomic_t execute_tasks;
- atomic_t dev_status_thr_count;
- atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
- spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
- spinlock_t state_task_lock;
- spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
- spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
- pid_t process_thread_pid;
- struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
- /* Linked list for struct se_global->g_se_dev_list */
- struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index c16e9431dd01..dac4f2d859fd 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -10,29 +10,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
-#define PYX_TRANSPORT_WRITE_PENDING 1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
-#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
-#define PYX_TRANSPORT_USE_SENSE_REASON -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE 0
-#define TRANSPORT_PLUGIN_REGISTERED 1
-
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b66ebb2032c6..378c7ed6760b 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -307,15 +307,8 @@ struct omap_dss_board_info {
void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
};
-#if defined(CONFIG_OMAP2_DSS_MODULE) || defined(CONFIG_OMAP2_DSS)
/* Init with the board info */
extern int omap_display_init(struct omap_dss_board_info *board_data);
-#else
-static inline int omap_display_init(struct omap_dss_board_info *board_data)
-{
- return 0;
-}
-#endif
struct omap_display_platform_data {
struct omap_dss_board_info *board_data;