summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/bootmem.h36
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dmaengine.h7
-rw-r--r--include/linux/hdreg.h1
-rw-r--r--include/linux/ide.h1
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/percpu.h112
-rw-r--r--include/linux/rcuclassic.h6
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcupreempt.h15
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/serio.h2
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/net/net_namespace.h27
20 files changed, 172 insertions, 74 deletions
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index ff8d27af4786..a11cc9d32591 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -69,8 +69,8 @@ struct detailed_pixel_timing {
u8 hborder;
u8 vborder;
u8 unknown0:1;
- u8 vsync_positive:1;
u8 hsync_positive:1;
+ u8 vsync_positive:1;
u8 separate_sync:2;
u8 stereo:1;
u8 unknown6:1;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 08a86d5cdf1b..9a061accd8b8 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -89,6 +89,8 @@ enum {
ATA_ID_DLF = 128,
ATA_ID_CSFO = 129,
ATA_ID_CFA_POWER = 160,
+ ATA_ID_CFA_KEY_MGMT = 162,
+ ATA_ID_CFA_MODES = 163,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb5256..455d83219fae 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
#define BOOTMEM_DEFAULT 0
#define BOOTMEM_EXCLUSIVE (1<<0)
+extern int reserve_bootmem(unsigned long addr,
+ unsigned long size,
+ int flags);
extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
-#endif
+ unsigned long physaddr,
+ unsigned long size,
+ int flags);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
+extern void *__alloc_bootmem(unsigned long size,
unsigned long align,
unsigned long goal);
-extern void *__alloc_bootmem(unsigned long size,
+extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal);
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
+extern void *__alloc_bootmem_low(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+
+#define alloc_bootmem_low(x) \
+ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_low_pages(x) \
+ __alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
int flags);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 384b38d3e8e2..161042746afc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -234,7 +234,6 @@ struct cpufreq_driver {
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
- bool hide_interface;
};
/* flags */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f0413845f20e..1956c8d46d32 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @refcount: local_t used for open-coded "bigref" counting
* @memcpy_count: transaction counter
* @bytes_transferred: byte counter
*/
@@ -114,9 +113,6 @@ struct dma_chan_percpu {
* @cookie: last cookie value returned to client
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
- * @refcount: kref, used in "bigref" slow-mode
- * @slow_ref: indicates that the DMA channel is free
- * @rcu: the DMA channel's RCU head
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client-count: how many clients are using this channel
@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor {
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
* @max_xor: maximum number of xor sources, 0 if no capability
- * @refcount: reference count
- * @done: IO completion struct
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @device_alloc_chan_resources: allocate resources and return the
@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_terminate_all: terminate all pending operations
+ * @device_is_tx_complete: poll for transaction completion
* @device_issue_pending: push pending transactions to hardware
*/
struct dma_device {
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index c37e9241fae7..ed21bd3dbd25 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -511,7 +511,6 @@ struct hd_driveid {
unsigned short words69_70[2]; /* reserved words 69-70
* future command overlap and queuing
*/
- /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
unsigned short words71_74[4]; /* reserved words 71-74
* for IDENTIFY PACKET DEVICE command
*/
diff --git a/include/linux/ide.h b/include/linux/ide.h
index fe235b65207e..e0cedfe9fad4 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -866,6 +866,7 @@ struct ide_host {
unsigned int n_ports;
struct device *dev[2];
unsigned int (*init_chipset)(struct pci_dev *);
+ irq_handler_t irq_handler;
unsigned long host_flags;
void *host_priv;
ide_hwif_t *cur_port; /* for hosts requiring serialization */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5d87bc09a1f5..dc18b87ed722 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -275,7 +275,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 1000,
+ ATA_TMOUT_PMP_SRST_WAIT = 5000,
/* ATA bus states */
BUS_UNKNOWN = 0,
@@ -530,6 +530,7 @@ struct ata_queued_cmd {
unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag;
unsigned int n_elem;
+ unsigned int orig_n_elem;
int dma_dir;
@@ -750,7 +751,8 @@ struct ata_port {
acpi_handle acpi_handle;
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
+ /* owned by EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 065cdf8c09fb..b1ea37fc7a24 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -145,7 +146,7 @@ extern pgprot_t protection_map[16];
*/
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
- return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
+ return (vma->vm_flags & VM_PFN_AT_MMAP);
}
static inline int is_pfn_mapping(struct vm_area_struct *vma)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 92915e81443f..d84feb7bdbf0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -276,4 +276,7 @@ struct mm_struct {
#endif
};
+/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+#define mm_cpumask(mm) (&(mm)->cpu_vm_mask)
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ec54785d34f9..659366734f3f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1079,6 +1079,7 @@ extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
+extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3577ffd90d45..ee5615d65211 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,6 +5,7 @@
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
#include <linux/cpumask.h>
+#include <linux/pfn.h>
#include <asm/percpu.h>
@@ -52,17 +53,18 @@
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
-#ifndef PERCPU_ENOUGH_ROOM
+/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE 8192
+#define PERCPU_MODULE_RESERVE (8 << 10)
#else
-#define PERCPU_MODULE_RESERVE 0
+#define PERCPU_MODULE_RESERVE 0
#endif
+#ifndef PERCPU_ENOUGH_ROOM
#define PERCPU_ENOUGH_ROOM \
- (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
-#endif /* PERCPU_ENOUGH_ROOM */
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+#endif
/*
* Must be an lvalue. Since @var must be a simple identifier,
@@ -76,52 +78,94 @@
#ifdef CONFIG_SMP
+#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+
+/* minimum unit size, also is the maximum supported allocation size */
+#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
+
+/*
+ * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
+ * back on the first chunk for dynamic percpu allocation if arch is
+ * manually allocating and mapping it for faster access (as a part of
+ * large page mapping for example).
+ *
+ * The following values give between one and two pages of free space
+ * after typical minimal boot (2-way SMP, single disk and NIC) with
+ * both defconfig and a distro config on x86_64 and 32. More
+ * intelligent way to determine this would be nice.
+ */
+#if BITS_PER_LONG > 32
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#else
+#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#endif
+
+extern void *pcpu_base_addr;
+
+typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
+typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+
+extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size,
+ void *base_addr,
+ pcpu_populate_pte_fn_t populate_pte_fn);
+
+extern ssize_t __init pcpu_embed_first_chunk(
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size);
+
+/*
+ * Use this to get to a cpu's version of the per-cpu object
+ * dynamically allocated. Non-atomic access to the current CPU's
+ * version should probably be combined with get_cpu()/put_cpu().
+ */
+#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+
+extern void *__alloc_reserved_percpu(size_t size, size_t align);
+
+#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
struct percpu_data {
void *ptrs[1];
};
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-/*
- * Use this to get to a cpu's version of the per-cpu object dynamically
- * allocated. Non-atomic access to the current CPU's version should
- * probably be combined with get_cpu()/put_cpu().
- */
-#define percpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
+
+#define per_cpu_ptr(ptr, cpu) \
+({ \
+ struct percpu_data *__p = __percpu_disguise(ptr); \
+ (__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
-extern void percpu_free(void *__pdata);
+#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void *__pdata);
#else /* CONFIG_SMP */
-#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static inline void *__alloc_percpu(size_t size, size_t align)
{
- return kzalloc(size, gfp);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+ return kzalloc(size, GFP_KERNEL);
}
-static inline void percpu_free(void *__pdata)
+static inline void free_percpu(void *p)
{
- kfree(__pdata);
+ kfree(p);
}
#endif /* CONFIG_SMP */
-#define percpu_alloc_mask(size, gfp, mask) \
- __percpu_alloc_mask((size), (gfp), &(mask))
-
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
-
-/* (legacy) interface for use without CPU hotplug handling */
-
-#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
- cpu_possible_map)
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
-#define free_percpu(ptr) percpu_free((ptr))
-#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
+#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index f3f697df1d71..80044a4f3ab9 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
#define rcu_enter_nohz() do { } while (0)
#define rcu_exit_nohz() do { } while (0)
+/* A context switch is a grace period for rcuclassic. */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1;
+}
+
#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 921340a7b71c..528343e6da51 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,6 +52,9 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
+/* Internal to kernel, but needed by rcupreempt.h. */
+extern int rcu_scheduler_active;
+
#if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h>
#elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
/* Internal to kernel */
extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
extern int rcu_needs_cpu(int cpu);
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 3e05c09b54a2..74304b4538d8 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
#define rcu_exit_nohz() do { } while (0)
#endif /* CONFIG_NO_HZ */
+/*
+ * A context switch is a grace period for rcupreempt synchronize_rcu()
+ * only during early boot, before the scheduler has been initialized.
+ * So, how the heck do we get a context switch? Well, if the caller
+ * invokes synchronize_rcu(), they are willing to accept a context
+ * switch, so we simply pretend that one happened.
+ *
+ * After boot, there might be a blocked or preempted task in an RCU
+ * read-side critical section, so we cannot then take the fastpath.
+ */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1 && !rcu_scheduler_active;
+}
+
#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d4368b7975c3..a722fb67bb2d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
}
#endif /* CONFIG_NO_HZ */
+/* A context switch is a grace period for rcutree. */
+static inline int rcu_blocking_is_gp(void)
+{
+ return num_online_cpus() == 1;
+}
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f0a50b20e8a0..46d680643f89 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1418,6 +1418,9 @@ struct task_struct {
#endif
};
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -2303,9 +2306,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
extern int sched_group_set_rt_period(struct task_group *tg,
long rt_period_us);
extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
#endif
+extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 1bcb357a01a1..e0417e4d3f15 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio)
#define SERIO_FUJITSU 0x35
#define SERIO_ZHENHUA 0x36
#define SERIO_INEXIO 0x37
-#define SERIO_TOUCHIT213 0x37
+#define SERIO_TOUCHIT213 0x38
#define SERIO_W8001 0x39
#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06a..a43ebec3a7b9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
+extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
+ pgprot_t prot, struct page **pages);
+extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
+extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 6fc13d905c5f..ded434b032a4 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -109,11 +109,6 @@ extern struct list_head net_namespace_list;
#ifdef CONFIG_NET_NS
extern void __put_net(struct net *net);
-static inline int net_alive(struct net *net)
-{
- return net && atomic_read(&net->count);
-}
-
static inline struct net *get_net(struct net *net)
{
atomic_inc(&net->count);
@@ -145,11 +140,6 @@ int net_eq(const struct net *net1, const struct net *net2)
}
#else
-static inline int net_alive(struct net *net)
-{
- return 1;
-}
-
static inline struct net *get_net(struct net *net)
{
return net;
@@ -234,6 +224,23 @@ struct pernet_operations {
void (*exit)(struct net *net);
};
+/*
+ * Use these carefully. If you implement a network device and it
+ * needs per network namespace operations use device pernet operations,
+ * otherwise use pernet subsys operations.
+ *
+ * This is critically important. Most of the network code cleanup
+ * runs with the assumption that dev_remove_pack has been called so no
+ * new packets will arrive during and after the cleanup functions have
+ * been called. dev_remove_pack is not per namespace so instead the
+ * guarantee of no more packets arriving in a network namespace is
+ * provided by ensuring that all network devices and all sockets have
+ * left the network namespace before the cleanup methods are called.
+ *
+ * For the longest time the ipv4 icmp code was registered as a pernet
+ * device which caused kernel oops, and panics during network
+ * namespace cleanup. So please don't get this wrong.
+ */
extern int register_pernet_subsys(struct pernet_operations *);
extern void unregister_pernet_subsys(struct pernet_operations *);
extern int register_pernet_gen_subsys(int *id, struct pernet_operations *);