summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-03-23 17:13:43 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-23 17:13:43 -0500
commit88e3c1da8b3258a81c5c81d4e7e22557b7d71ba7 (patch)
treeab518773c0ff4606f1a57d00b5931332a7e1d96e /include
parentfa4fa40a990f8f4eff65476bef32007c154bbac0 (diff)
parentb0e6e962992b76580f4900b166a337bad7c1e81b (diff)
downloadlinux-88e3c1da8b3258a81c5c81d4e7e22557b7d71ba7.tar.gz
linux-88e3c1da8b3258a81c5c81d4e7e22557b7d71ba7.tar.bz2
linux-88e3c1da8b3258a81c5c81d4e7e22557b7d71ba7.zip
Merge branch 'master'
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/mmu_context.h5
-rw-r--r--include/asm-alpha/topology.h4
-rw-r--r--include/asm-generic/bug.h4
-rw-r--r--include/asm-generic/percpu.h7
-rw-r--r--include/asm-i386/alternative.h129
-rw-r--r--include/asm-i386/arch_hooks.h3
-rw-r--r--include/asm-i386/atomic.h36
-rw-r--r--include/asm-i386/bitops.h7
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h10
-rw-r--r--include/asm-i386/mach-visws/do_timer.h2
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/mtrr.h1
-rw-r--r--include/asm-i386/mutex.h6
-rw-r--r--include/asm-i386/pgtable-2level.h2
-rw-r--r--include/asm-i386/pgtable-3level.h2
-rw-r--r--include/asm-i386/rwlock.h56
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h34
-rw-r--r--include/asm-i386/system.h62
-rw-r--r--include/asm-i386/uaccess.h12
-rw-r--r--include/asm-i386/unistd.h36
-rw-r--r--include/asm-ia64/atomic.h8
-rw-r--r--include/asm-ia64/cache.h2
-rw-r--r--include/asm-m68k/atomic.h8
-rw-r--r--include/asm-parisc/cache.h2
-rw-r--r--include/asm-powerpc/atomic.h38
-rw-r--r--include/asm-powerpc/cputable.h38
-rw-r--r--include/asm-powerpc/cputime.h202
-rw-r--r--include/asm-powerpc/firmware.h16
-rw-r--r--include/asm-powerpc/irq.h6
-rw-r--r--include/asm-powerpc/iseries/mf.h7
-rw-r--r--include/asm-powerpc/lmb.h19
-rw-r--r--include/asm-powerpc/mmu.h1
-rw-r--r--include/asm-powerpc/paca.h7
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-powerpc/pgtable-4k.h11
-rw-r--r--include/asm-powerpc/pgtable.h9
-rw-r--r--include/asm-powerpc/ppc_asm.h42
-rw-r--r--include/asm-powerpc/processor.h1
-rw-r--r--include/asm-powerpc/prom.h6
-rw-r--r--include/asm-powerpc/rwsem.h2
-rw-r--r--include/asm-powerpc/synch.h2
-rw-r--r--include/asm-powerpc/system.h6
-rw-r--r--include/asm-powerpc/time.h15
-rw-r--r--include/asm-ppc/harrier.h2
-rw-r--r--include/asm-ppc/ibm44x.h2
-rw-r--r--include/asm-ppc/ibm4xx.h4
-rw-r--r--include/asm-ppc/io.h7
-rw-r--r--include/asm-ppc/mpc10x.h3
-rw-r--r--include/asm-ppc/mpc52xx.h1
-rw-r--r--include/asm-ppc/mpc8260.h1
-rw-r--r--include/asm-ppc/mpc83xx.h1
-rw-r--r--include/asm-ppc/mpc85xx.h1
-rw-r--r--include/asm-ppc/mpc8xx.h3
-rw-r--r--include/asm-ppc/pgtable.h6
-rw-r--r--include/asm-ppc/ppc_sys.h34
-rw-r--r--include/asm-ppc/time.h5
-rw-r--r--include/asm-ppc/todc.h2
-rw-r--r--include/asm-ppc/xparameters.h18
-rw-r--r--include/asm-s390/atomic.h18
-rw-r--r--include/asm-s390/percpu.h7
-rw-r--r--include/asm-sparc64/atomic.h10
-rw-r--r--include/asm-sparc64/cache.h2
-rw-r--r--include/asm-sparc64/percpu.h7
-rw-r--r--include/asm-um/alternative.h6
-rw-r--r--include/asm-x86_64/atomic.h8
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/percpu.h7
-rw-r--r--include/linux/cache.h4
-rw-r--r--include/linux/cdrom.h5
-rw-r--r--include/linux/eventpoll.h8
-rw-r--r--include/linux/ext3_fs.h9
-rw-r--r--include/linux/ext3_fs_i.h7
-rw-r--r--include/linux/file.h28
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/generic_serial.h4
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/nbd.h3
-rw-r--r--include/linux/ncp_fs_i.h2
-rw-r--r--include/linux/ncp_fs_sb.h5
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/profile.h2
-rw-r--r--include/linux/quota.h7
-rw-r--r--include/linux/raid/raid1.h2
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/tty.h8
-rw-r--r--include/linux/tty_flip.h12
-rw-r--r--include/linux/udf_fs_sb.h4
-rw-r--r--include/linux/vt_kern.h5
101 files changed, 871 insertions, 369 deletions
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6f92482cc96c..0c017fc181c1 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- mm->context[i] = 0;
+ for_each_online_cpu(i)
+ mm->context[i] = 0;
if (tsk != current)
task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index eb740e280d9c..420ccde6b916 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node)
cpumask_t node_cpu_mask = CPU_MASK_NONE;
int cpu;
- for(cpu = 0; cpu < NR_CPUS; cpu++) {
- if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
+ for_each_online_cpu(cpu) {
+ if (cpu_to_node(cpu) == node)
cpu_set(cpu, node_cpu_mask);
}
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 400c2b41896e..1a565a9d2fa7 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_BUG
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
- printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
panic("BUG!"); \
} while (0)
#endif
@@ -19,7 +19,7 @@
#ifndef HAVE_ARCH_WARN_ON
#define WARN_ON(condition) do { \
if (unlikely((condition)!=0)) { \
- printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \
dump_stack(); \
} \
} while (0)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 9044aeb37828..78cf45547e31 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset[__i], \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset[__i], \
+ (src), (size)); \
} while (0)
#else /* ! SMP */
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
new file mode 100644
index 000000000000..e201decea0c9
--- /dev/null
+++ b/include/asm-i386/alternative.h
@@ -0,0 +1,129 @@
+#ifndef _I386_ALTERNATIVE_H
+#define _I386_ALTERNATIVE_H
+
+#ifdef __KERNEL__
+
+struct alt_instr {
+ u8 *instr; /* original instruction */
+ u8 *replacement;
+ u8 cpuid; /* cpuid bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction, <= instrlen */
+ u8 pad;
+};
+
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+struct module;
+extern void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end);
+extern void alternatives_smp_module_del(struct module *mod);
+extern void alternatives_smp_switch(int smp);
+
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */\
+ ".previous" :: "i" (feature) : "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */\
+ ".previous" :: "i" (feature), ##input)
+
+/*
+ * Alternative inline assembly for SMP.
+ *
+ * alternative_smp() takes two versions (SMP first, UP second) and is
+ * for more complex stuff such as spinlocks.
+ *
+ * The LOCK_PREFIX macro defined here replaces the LOCK and
+ * LOCK_PREFIX macros used everywhere in the source tree.
+ *
+ * SMP alternatives use the same data structures as the other
+ * alternatives and the X86_FEATURE_UP flag to indicate the case of a
+ * UP system running a SMP kernel. The existing apply_alternatives()
+ * works fine for patching a SMP kernel for UP.
+ *
+ * The SMP alternative tables can be kept after boot and contain both
+ * UP and SMP versions of the instructions to allow switching back to
+ * SMP at runtime, when hotplugging in a new CPU, which is especially
+ * useful in virtualized environments.
+ *
+ * The very common lock prefix is handled as special case in a
+ * separate table which is a pure address list without replacement ptr
+ * and size information. That keeps the table sizes small.
+ */
+
+#ifdef CONFIG_SMP
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile ("661:\n\t" smpinstr "\n662:\n" \
+ ".section .smp_altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte 0x68\n" /* X86_FEATURE_UP */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .smp_altinstr_replacement,\"awx\"\n" \
+ "663:\n\t" upinstr "\n" /* replacement */ \
+ "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
+ ".previous" : args)
+
+#define LOCK_PREFIX \
+ ".section .smp_locks,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661f\n" /* address */ \
+ ".previous\n" \
+ "661:\n\tlock; "
+
+#else /* ! CONFIG_SMP */
+#define alternative_smp(smpinstr, upinstr, args...) \
+ asm volatile (upinstr : args)
+#define LOCK_PREFIX ""
+#endif
+
+#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h
index 28b96a6fb9fa..238cf4275b96 100644
--- a/include/asm-i386/arch_hooks.h
+++ b/include/asm-i386/arch_hooks.h
@@ -24,4 +24,7 @@ extern void trap_init_hook(void);
extern void time_init_hook(void);
extern void mca_nmi_hook(void);
+extern int setup_early_printk(char *);
+extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
+
#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..22d80ece95cb 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -10,12 +10,6 @@
* resource counting etc..
*/
-#ifdef CONFIG_SMP
-#define LOCK "lock ; "
-#else
-#define LOCK ""
-#endif
-
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
@@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "addl %1,%0"
+ LOCK_PREFIX "addl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
- LOCK "subl %1,%0"
+ LOCK_PREFIX "subl %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
@@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "subl %2,%0; sete %1"
+ LOCK_PREFIX "subl %2,%0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "incl %0"
+ LOCK_PREFIX "incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v)
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
- LOCK "decl %0"
+ LOCK_PREFIX "decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
@@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "decl %0; sete %1"
+ LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "incl %0; sete %1"
+ LOCK_PREFIX "incl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
@@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
unsigned char c;
__asm__ __volatile__(
- LOCK "addl %2,%0; sets %1"
+ LOCK_PREFIX "addl %2,%0; sets %1"
:"=m" (v->counter), "=qm" (c)
:"ir" (i), "m" (v->counter) : "memory");
return c;
@@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK "xaddl %0, %1;"
+ LOCK_PREFIX "xaddl %0, %1;"
:"=r"(i)
:"m"(v->counter), "0"(i));
return i + __i;
@@ -231,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK "andl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)),"m" (*addr) : "memory")
#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK "orl %0,%1" \
+__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 88e6ca248cd7..7d20b95edb3b 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
+#include <asm/alternative.h>
/*
* These have to be done with inline assembly: that way the bit-setting
@@ -16,12 +17,6 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
-#ifdef CONFIG_SMP
-#define LOCK_PREFIX "lock ; "
-#else
-#define LOCK_PREFIX ""
-#endif
-
#define ADDR (*(volatile long *) addr)
/**
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 615911e5bd24..ca15c9c665cf 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,4 +10,6 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index c4ec2a4d8fdf..5c0b5876b931 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -70,6 +70,7 @@
#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56211414fc95..6312c3e79814 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 4a0637a3e208..99f66be240be 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -30,7 +30,8 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
return 0;
}
-static inline int es7000_check_dsdt()
+#ifdef CONFIG_ACPI
+static inline int es7000_check_dsdt(void)
{
struct acpi_table_header *header = NULL;
if(!acpi_get_table_header_early(ACPI_DSDT, &header))
@@ -54,6 +55,11 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
return 0;
}
-
+#else
+static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+ return 0;
+}
+#endif
#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
index 92d638fc8b11..95568e6ca91c 100644
--- a/include/asm-i386/mach-visws/do_timer.h
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
/*
* In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index ae510e5d0d78..eaf518098981 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
do_timer(regs);
#ifndef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode_vm(regs));
#endif
voyager_timer_interrupt(regs);
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 64a0b8e6afeb..62113d3bfdc2 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -22,7 +22,6 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_irq_entries;
extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
extern int mpc_default_type;
-extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
extern unsigned long mp_lapic_addr;
extern int pic_mode;
extern int using_apic_timer;
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5b6ceda68c5f..64cf937c7e33 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -25,6 +25,7 @@
#include <linux/config.h>
#include <linux/ioctl.h>
+#include <linux/errno.h>
#define MTRR_IOCTL_BASE 'M'
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 9b2199e829f3..05a538531229 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -9,6 +9,8 @@
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
+#include "asm/alternative.h"
+
/**
* __mutex_fastpath_lock - try to take the lock by moving the count
* from 1 to a 0 value
@@ -27,7 +29,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " decl (%%eax) \n" \
+ LOCK_PREFIX " decl (%%eax) \n" \
" js 2f \n" \
"1: \n" \
\
@@ -83,7 +85,7 @@ do { \
typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
\
__asm__ __volatile__( \
- LOCK " incl (%%eax) \n" \
+ LOCK_PREFIX " incl (%%eax) \n" \
" jle 2f \n" \
"1: \n" \
\
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 74ef721b534d..27bde973abc7 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -61,4 +61,6 @@ static inline int pte_exec_kernel(pte_t pte)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+void vmalloc_sync_all(void);
+
#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index f1a8b454920a..36a5aa63cbbf 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -152,4 +152,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
#define __pmd_free_tlb(tlb, x) do { } while (0)
+#define vmalloc_sync_all() ((void)0)
+
#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index b57cc7afdf7e..94f00195d543 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -21,21 +21,23 @@
#define RW_LOCK_BIAS_STR "0x01000000"
#define __build_read_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $1,(%0)\n\t" \
- "jns 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
+ alternative_smp("lock; subl $1,(%0)\n\t" \
+ "jns 1f\n" \
+ "call " helper "\n\t" \
+ "1:\n", \
+ "subl $1,(%0)\n\t", \
+ :"a" (rw) : "memory")
#define __build_read_lock_const(rw, helper) \
- asm volatile(LOCK "subl $1,%0\n\t" \
- "jns 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"=m" (*(volatile int *)rw) : : "memory")
+ alternative_smp("lock; subl $1,%0\n\t" \
+ "jns 1f\n" \
+ "pushl %%eax\n\t" \
+ "leal %0,%%eax\n\t" \
+ "call " helper "\n\t" \
+ "popl %%eax\n\t" \
+ "1:\n", \
+ "subl $1,%0\n\t", \
+ "=m" (*(volatile int *)rw) : : "memory")
#define __build_read_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
@@ -45,21 +47,23 @@
} while (0)
#define __build_write_lock_ptr(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
- "jz 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
+ alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
+ "jz 1f\n" \
+ "call " helper "\n\t" \
+ "1:\n", \
+ "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \
+ :"a" (rw) : "memory")
#define __build_write_lock_const(rw, helper) \
- asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
- "jz 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"=m" (*(volatile int *)rw) : : "memory")
+ alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
+ "jz 1f\n" \
+ "pushl %%eax\n\t" \
+ "leal %0,%%eax\n\t" \
+ "call " helper "\n\t" \
+ "popl %%eax\n\t" \
+ "1:\n", \
+ "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
+ "=m" (*(volatile int *)rw) : : "memory")
#define __build_write_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 6a42b2142fd6..f7a0f310c524 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem)
might_sleep();
__asm__ __volatile__(
"# atomic down operation\n\t"
- LOCK "decl %0\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %0\n\t" /* --sem->count */
"js 2f\n"
"1:\n"
LOCK_SECTION_START("")
@@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem)
might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- LOCK "decl %1\n\t" /* --sem->count */
+ LOCK_PREFIX "decl %1\n\t" /* --sem->count */
"js 2f\n\t"
"xorl %0,%0\n"
"1:\n"
@@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
- LOCK "incl %0\n\t" /* ++sem->count */
+ LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
"jle 2f\n"
"1:\n"
LOCK_SECTION_START("")
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 23604350cdf4..d76b7693cf1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -35,31 +35,41 @@
#define __raw_spin_lock_string_flags \
"\n1:\t" \
"lock ; decb %0\n\t" \
- "jns 4f\n\t" \
+ "jns 5f\n" \
"2:\t" \
"testl $0x200, %1\n\t" \
- "jz 3f\n\t" \
- "sti\n\t" \
+ "jz 4f\n\t" \
+ "sti\n" \
"3:\t" \
"rep;nop\n\t" \
"cmpb $0, %0\n\t" \
"jle 3b\n\t" \
"cli\n\t" \
"jmp 1b\n" \
- "4:\n\t"
+ "4:\t" \
+ "rep;nop\n\t" \
+ "cmpb $0, %0\n\t" \
+ "jg 1b\n\t" \
+ "jmp 4b\n" \
+ "5:\n\t"
+
+#define __raw_spin_lock_string_up \
+ "\n\tdecb %0"
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- __raw_spin_lock_string
- :"=m" (lock->slock) : : "memory");
+ alternative_smp(
+ __raw_spin_lock_string,
+ __raw_spin_lock_string_up,
+ "=m" (lock->slock) : : "memory");
}
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
- __asm__ __volatile__(
- __raw_spin_lock_string_flags
- :"=m" (lock->slock) : "r" (flags) : "memory");
+ alternative_smp(
+ __raw_spin_lock_string_flags,
+ __raw_spin_lock_string_up,
+ "=m" (lock->slock) : "r" (flags) : "memory");
}
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
@@ -178,12 +188,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+ asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
: "=m" (rw->lock) : : "memory");
}
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 399145a247f2..d0d8d7448d88 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#endif
-#ifdef __KERNEL__
-struct alt_instr {
- __u8 *instr; /* original instruction */
- __u8 *replacement;
- __u8 cpuid; /* cpuid bit set for replacement */
- __u8 instrlen; /* length of original instruction */
- __u8 replacementlen; /* length of new instruction, <= instrlen */
- __u8 pad;
-};
-#endif
-
-/*
- * Alternative instructions for different CPU types or capabilities.
- *
- * This allows to use optimized instructions even on generic binary
- * kernels.
- *
- * length of oldinstr must be longer or equal the length of newinstr
- * It can be padded with nops as needed.
- *
- * For non barrier like inlines please define new variants
- * without volatile and memory clobber.
- */
-#define alternative(oldinstr, newinstr, feature) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 4\n" \
- " .long 661b\n" /* label */ \
- " .long 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature) : "memory")
-
-/*
- * Alternative inline assembly with input.
- *
- * Pecularities:
- * No memory clobber here.
- * Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement maake sure to pad to the worst case length.
- */
-#define alternative_input(oldinstr, newinstr, feature, input...) \
- asm volatile ("661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .align 4\n" \
- " .long 661b\n" /* label */ \
- " .long 663f\n" /* new instruction */ \
- " .byte %c0\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .altinstr_replacement,\"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous" :: "i" (feature), ##input)
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
@@ -558,5 +497,6 @@ static inline void sched_cacheflush(void)
}
extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 3f1337c34208..371457b1ceb6 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -197,13 +197,15 @@ extern void __put_user_8(void);
#define put_user(x,ptr) \
({ int __ret_pu; \
+ __typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
+ __pu_val = x; \
switch(sizeof(*(ptr))) { \
- case 1: __put_user_1(x, ptr); break; \
- case 2: __put_user_2(x, ptr); break; \
- case 4: __put_user_4(x, ptr); break; \
- case 8: __put_user_8(x, ptr); break; \
- default:__put_user_X(x, ptr); break; \
+ case 1: __put_user_1(__pu_val, ptr); break; \
+ case 2: __put_user_2(__pu_val, ptr); break; \
+ case 4: __put_user_4(__pu_val, ptr); break; \
+ case 8: __put_user_8(__pu_val, ptr); break; \
+ default:__put_user_X(__pu_val, ptr); break; \
} \
__ret_pu; \
})
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index dc81a55dd94d..d8afd0e3b81a 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -347,9 +347,9 @@ __syscall_return(type,__res); \
type name(type1 arg1) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \
+ : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -357,9 +357,10 @@ __syscall_return(type,__res); \
type name(type1 arg1,type2 arg2) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
+ : "memory"); \
__syscall_return(type,__res); \
}
@@ -367,9 +368,9 @@ __syscall_return(type,__res); \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -378,9 +379,9 @@ __syscall_return(type,__res); \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
+ : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
"d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
__syscall_return(type,__res); \
}
@@ -390,10 +391,12 @@ __syscall_return(type,__res); \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
long __res; \
-__asm__ volatile ("int $0x80" \
+__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
+ "int $0x80 ; pop %%ebx" \
: "=a" (__res) \
- : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \
+ : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
+ : "memory"); \
__syscall_return(type,__res); \
}
@@ -402,11 +405,14 @@ __syscall_return(type,__res); \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
{ \
long __res; \
-__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \
+ struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
+__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
+ "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
+ "pop %%ebx ; pop %%ebp" \
: "=a" (__res) \
- : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \
- "0" ((long)(arg6)) : "memory"); \
+ : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
+ "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
+ : "memory"); \
__syscall_return(type,__res); \
}
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index d3e0dfa99e1f..569ec7574baf 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
index 40dd25195d65..f0a104db8f20 100644
--- a/include/asm-ia64/cache.h
+++ b/include/asm-ia64/cache.h
@@ -25,4 +25,6 @@
# define SMP_CACHE_BYTES (1 << 3)
#endif
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif /* _ASM_IA64_CACHE_H */
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 862e497c2645..732d696d31a6 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h
index 93f179f13ce8..ae50f8e12eed 100644
--- a/include/asm-parisc/cache.h
+++ b/include/asm-parisc/cache.h
@@ -29,6 +29,8 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
extern void flush_data_cache_local(void *); /* flushes local data-cache only */
extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
#ifdef CONFIG_SMP
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 147a38dcc766..bb3c0ab7e667 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -8,6 +8,7 @@
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
+#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
@@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 5638518968c3..fe45f6f3a4be 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -102,38 +102,40 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#ifdef __powerpc64__
/* Add the 64b processor unique features in the top half of the word */
-#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
-#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
-#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
-#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
-#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
+#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
+#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
-#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
+#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
+#define CPU_FTR_PURR ASM_CONST(0x0000400000000000)
#else
/* ensure on 32b processors the flags are available for compiling but
* don't do anything */
-#define CPU_FTR_SLB ASM_CONST(0x0)
-#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
-#define CPU_FTR_TLBIEL ASM_CONST(0x0)
-#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
-#define CPU_FTR_IABR ASM_CONST(0x0)
-#define CPU_FTR_MMCRA ASM_CONST(0x0)
+#define CPU_FTR_SLB ASM_CONST(0x0)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
+#define CPU_FTR_IABR ASM_CONST(0x0)
+#define CPU_FTR_MMCRA ASM_CONST(0x0)
#define CPU_FTR_CTRL ASM_CONST(0x0)
-#define CPU_FTR_SMT ASM_CONST(0x0)
-#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
+#define CPU_FTR_SMT ASM_CONST(0x0)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0)
+#define CPU_FTR_PURR ASM_CONST(0x0)
#endif
#ifndef __ASSEMBLY__
@@ -318,7 +320,7 @@ enum {
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
CPU_FTR_MMCRA | CPU_FTR_SMT |
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
- CPU_FTR_MMCRA_SIHV,
+ CPU_FTR_MMCRA_SIHV | CPU_FTR_PURR,
CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index 6d68ad7e0ea3..a21185d47883 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -1 +1,203 @@
+/*
+ * Definitions for measuring cputime on powerpc machines.
+ *
+ * Copyright (C) 2006 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * the same units as the timebase. Otherwise we measure cpu time
+ * in jiffies using the generic definitions.
+ */
+
+#ifndef __POWERPC_CPUTIME_H
+#define __POWERPC_CPUTIME_H
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#include <asm-generic/cputime.h>
+#else
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+#include <asm/param.h>
+
+typedef u64 cputime_t;
+typedef u64 cputime64_t;
+
+#define cputime_zero ((cputime_t)0)
+#define cputime_max ((~((cputime_t)0) >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+
+#define cputime64_zero ((cputime64_t)0)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime_to_cputime64(__ct) (__ct)
+
+#ifdef __KERNEL__
+
+/*
+ * Convert cputime <-> jiffies
+ */
+extern u64 __cputime_jiffies_factor;
+
+static inline unsigned long cputime_to_jiffies(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+static inline cputime_t jiffies_to_cputime(const unsigned long jif)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = jif % HZ;
+ sec = jif / HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_jiffies_factor);
+}
+
+/*
+ * Convert cputime <-> milliseconds
+ */
+extern u64 __cputime_msec_factor;
+
+static inline unsigned long cputime_to_msecs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_msec_factor);
+}
+
+static inline cputime_t msecs_to_cputime(const unsigned long ms)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = ms % 1000;
+ sec = ms / 1000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, 1000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+/*
+ * Convert cputime <-> seconds
+ */
+extern u64 __cputime_sec_factor;
+
+static inline unsigned long cputime_to_secs(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_sec_factor);
+}
+
+static inline cputime_t secs_to_cputime(const unsigned long sec)
+{
+ return (cputime_t) sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timespec
+ */
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_nsec = x;
+}
+
+static inline cputime_t timespec_to_cputime(const struct timespec *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_nsec * tb_ticks_per_sec;
+ do_div(ct, 1000000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> timeval
+ */
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
+{
+ u64 x = ct;
+ unsigned int frac;
+
+ frac = do_div(x, tb_ticks_per_sec);
+ p->tv_sec = x;
+ x = (u64) frac * 1000000;
+ do_div(x, tb_ticks_per_sec);
+ p->tv_usec = x;
+}
+
+static inline cputime_t timeval_to_cputime(const struct timeval *p)
+{
+ cputime_t ct;
+
+ ct = (u64) p->tv_usec * tb_ticks_per_sec;
+ do_div(ct, 1000000);
+ return ct + (u64) p->tv_sec * tb_ticks_per_sec;
+}
+
+/*
+ * Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
+ */
+extern u64 __cputime_clockt_factor;
+
+static inline unsigned long cputime_to_clock_t(const cputime_t ct)
+{
+ return mulhdu(ct, __cputime_clockt_factor);
+}
+
+static inline cputime_t clock_t_to_cputime(const unsigned long clk)
+{
+ cputime_t ct;
+ unsigned long sec;
+
+ /* have to be a little careful about overflow */
+ ct = clk % USER_HZ;
+ sec = clk / USER_HZ;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+ do_div(ct, USER_HZ);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+ return ct;
+}
+
+#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* __POWERPC_CPUTIME_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index f804b34cf06a..ce3788224ed0 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -41,6 +41,7 @@
#define FW_FEATURE_MULTITCE (1UL<<19)
#define FW_FEATURE_SPLPAR (1UL<<20)
#define FW_FEATURE_ISERIES (1UL<<21)
+#define FW_FEATURE_LPAR (1UL<<22)
enum {
#ifdef CONFIG_PPC64
@@ -51,10 +52,10 @@ enum {
FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
- FW_FEATURE_SPLPAR,
+ FW_FEATURE_SPLPAR | FW_FEATURE_LPAR,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES,
- FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES,
+ FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+ FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@@ -89,15 +90,6 @@ static inline unsigned long firmware_has_feature(unsigned long feature)
(FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
}
-#ifdef CONFIG_PPC_PSERIES
-typedef struct {
- unsigned long val;
- char * name;
-} firmware_feature_t;
-
-extern firmware_feature_t firmware_features_table[];
-#endif
-
extern void system_reset_fwnmi(void);
extern void machine_check_fwnmi(void);
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 8eb7e857ec4c..51f87d9993b6 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -479,6 +479,10 @@ extern int distribute_irqs;
struct irqaction;
struct pt_regs;
+#define __ARCH_HAS_DO_SOFTIRQ
+
+extern void __do_softirq(void);
+
#ifdef CONFIG_IRQSTACKS
/*
* Per-cpu stacks for handling hard and soft interrupts.
@@ -491,8 +495,6 @@ extern void call_do_softirq(struct thread_info *tp);
extern int call___do_IRQ(int irq, struct pt_regs *regs,
struct thread_info *tp);
-#define __ARCH_HAS_DO_SOFTIRQ
-
#else
#define irq_ctx_init()
diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h
index 857e5202fc78..eb851a9c9e5c 100644
--- a/include/asm-powerpc/iseries/mf.h
+++ b/include/asm-powerpc/iseries/mf.h
@@ -41,16 +41,11 @@ extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
unsigned count, MFCompleteHandler hdlr, void *userToken);
extern void mf_power_off(void);
-extern void mf_reboot(void);
+extern void mf_reboot(char *cmd);
extern void mf_display_src(u32 word);
extern void mf_display_progress(u16 value);
-extern void mf_clear_src(void);
extern void mf_init(void);
-extern int mf_get_rtc(struct rtc_time *tm);
-extern int mf_get_boot_rtc(struct rtc_time *tm);
-extern int mf_set_rtc(struct rtc_time *tm);
-
#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
index d3546c4c9f46..0c5880f70225 100644
--- a/include/asm-powerpc/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -19,8 +19,6 @@
#define MAX_LMB_REGIONS 128
-#define LMB_ALLOC_ANYWHERE 0
-
struct lmb_property {
unsigned long base;
unsigned long size;
@@ -43,20 +41,19 @@ extern struct lmb lmb;
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
-extern long __init lmb_add(unsigned long, unsigned long);
-extern long __init lmb_reserve(unsigned long, unsigned long);
-extern unsigned long __init lmb_alloc(unsigned long, unsigned long);
-extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
- unsigned long);
+extern long __init lmb_add(unsigned long base, unsigned long size);
+extern long __init lmb_reserve(unsigned long base, unsigned long size);
+extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align);
+extern unsigned long __init lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
+extern unsigned long __init __lmb_alloc_base(unsigned long size,
+ unsigned long align, unsigned long max_addr);
extern unsigned long __init lmb_phys_mem_size(void);
extern unsigned long __init lmb_end_of_DRAM(void);
-extern unsigned long __init lmb_abs_to_phys(unsigned long);
-extern void __init lmb_enforce_memory_limit(unsigned long);
+extern void __init lmb_enforce_memory_limit(unsigned long memory_limit);
extern void lmb_dump_all(void);
-extern unsigned long io_hole_start;
-
static inline unsigned long
lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
{
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index b0b9a3f8cdc2..31f721994bd8 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -236,7 +236,6 @@ extern void htab_initialize_secondary(void);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_iSeries(void);
-extern void mm_init_ppc64(void);
extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn,
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index c9add8f1ad94..4465b95ebef0 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -54,7 +54,7 @@ struct paca_struct {
#endif /* CONFIG_PPC_ISERIES */
/*
- * MAGIC: the spinlock functions in arch/ppc64/lib/locks.c
+ * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
* load lock_token and paca_index with a single lwz
* instruction. They must travel together and be properly
* aligned.
@@ -96,6 +96,11 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u8 proc_enabled; /* irq soft-enable flag */
+
+ /* Stuff for accurate time accounting */
+ u64 user_time; /* accumulated usermode TB ticks */
+ u64 system_time; /* accumulated system TB ticks */
+ u64 startpurr; /* PURR/TB value snapshot */
};
extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index e31922c50e53..464301cd0d03 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -27,10 +27,9 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
} while (0)
extern void setup_per_cpu_areas(void);
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index 80a7832d2721..b2e18629932a 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -62,9 +62,14 @@
/* shift to put page number into pte */
#define PTE_RPN_SHIFT (17)
-#define __real_pte(e,p) ((real_pte_t)(e))
-#define __rpte_to_pte(r) (r)
-#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12)
+#ifdef STRICT_MM_TYPECHECKS
+#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __rpte_to_pte(r) ((r).pte)
+#else
+#define __real_pte(e,p) (e)
+#define __rpte_to_pte(r) (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 185ee15963a1..e9f1f4627e6b 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -188,9 +188,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (pmd_val(pmd) == 0)
+#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
+ || (pmd_val(pmd) & PMD_BAD_BITS))
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
@@ -198,7 +202,8 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) ((pud_val(pud)) == 0)
+#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
+ || (pud_val(pud) & PUD_BAD_BITS))
#define pud_present(pud) (pud_val(pud) != 0)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index ab8688d39024..dd1c0a913d5f 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -15,6 +15,48 @@
#define SZL (BITS_PER_LONG/8)
/*
+ * Stuff for accurate CPU time accounting.
+ * These macros handle transitions between user and system state
+ * in exception entry and exit and accumulate time to the
+ * user_time and system_time fields in the paca.
+ */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
+#define ACCOUNT_CPU_USER_EXIT(ra, rb)
+#else
+#define ACCOUNT_CPU_USER_ENTRY(ra, rb) \
+ beq 2f; /* if from kernel mode */ \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ mftb ra; /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_USER_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_USER_TIME(r13); \
+2:
+
+#define ACCOUNT_CPU_USER_EXIT(ra, rb) \
+BEGIN_FTR_SECTION; \
+ mfspr ra,SPRN_PURR; /* get processor util. reg */ \
+END_FTR_SECTION_IFSET(CPU_FTR_PURR); \
+BEGIN_FTR_SECTION; \
+ mftb ra; /* or get TB if no PURR */ \
+END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \
+ ld rb,PACA_STARTPURR(r13); \
+ std ra,PACA_STARTPURR(r13); \
+ subf rb,rb,ra; /* subtract start value */ \
+ ld ra,PACA_SYSTEM_TIME(r13); \
+ add ra,ra,rb; /* add on to user time */ \
+ std ra,PACA_SYSTEM_TIME(r13);
+#endif
+
+/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 415fa393b00c..1c64a211cf19 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -52,7 +52,6 @@
#ifdef __KERNEL__
#define platform_is_pseries() (_machine == PLATFORM_PSERIES || \
_machine == PLATFORM_PSERIES_LPAR)
-#define platform_is_lpar() (!!(_machine & PLATFORM_LPAR))
#if defined(CONFIG_PPC_MULTIPLATFORM)
extern int _machine;
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index cbd297f44cce..782e13a070a1 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -126,8 +126,14 @@ extern struct device_node *find_all_nodes(void);
/* New style node lookup */
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
extern struct device_node *of_find_node_by_path(const char *path);
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
index 79bae4933b73..2c2fe9647595 100644
--- a/include/asm-powerpc/rwsem.h
+++ b/include/asm-powerpc/rwsem.h
@@ -4,7 +4,7 @@
#ifdef __KERNEL__
/*
- * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
+ * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
* in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*/
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index c90d9d9aae72..2cda3c38a9fa 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -15,7 +15,7 @@
#endif
#ifdef CONFIG_SMP
-#define ISYNC_ON_SMP "\n\tisync"
+#define ISYNC_ON_SMP "\n\tisync\n"
#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
#else
#define ISYNC_ON_SMP
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d9bf53653b10..65f5a7b2646b 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -171,6 +171,8 @@ extern u32 booke_wdt_period;
/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
extern unsigned char e2a(unsigned char);
+extern unsigned char* strne2a(unsigned char *dest,
+ const unsigned char *src, size_t n);
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
@@ -424,5 +426,9 @@ static inline void create_function_call(unsigned long addr, void * func)
create_branch(addr, func_addr, BRANCH_SET_LINK);
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index baddc9ab57ad..912118db13ae 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -41,6 +41,7 @@ extern time_t last_rtc_update;
extern void generic_calibrate_decr(void);
extern void wakeup_decrementer(void);
+extern void snapshot_timebase(void);
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
@@ -221,5 +222,19 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_process_vtime(struct task_struct *tsk);
+#else
+#define account_process_vtime(tsk) do { } while (0)
+#endif
+
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+extern void calculate_steal_time(void);
+extern void snapshot_timebases(void);
+#else
+#define calculate_steal_time() do { } while (0)
+#define snapshot_timebases() do { } while (0)
+#endif
+
#endif /* __KERNEL__ */
#endif /* __PPC64_TIME_H */
diff --git a/include/asm-ppc/harrier.h b/include/asm-ppc/harrier.h
index 36c73ab7e43e..7acd7fc126ec 100644
--- a/include/asm-ppc/harrier.h
+++ b/include/asm-ppc/harrier.h
@@ -1,6 +1,4 @@
/*
- * arch/ppc/kernel/harrier.h
- *
* Definitions for Motorola MCG Harrier North Bridge & Memory controller
*
* Author: Dale Farnsworth
diff --git a/include/asm-ppc/ibm44x.h b/include/asm-ppc/ibm44x.h
index f835066fb3ca..3acc382cc83f 100644
--- a/include/asm-ppc/ibm44x.h
+++ b/include/asm-ppc/ibm44x.h
@@ -29,7 +29,7 @@
/* TLB entry offset/size used for pinning kernel lowmem */
#define PPC44x_PIN_SHIFT 28
-#define PPC44x_PIN_SIZE (1 << PPC44x_PIN_SHIFT)
+#define PPC_PIN_SIZE (1 << PPC44x_PIN_SHIFT)
/* Lowest TLB slot consumed by the default pinned TLBs */
#define PPC44x_LOW_SLOT 63
diff --git a/include/asm-ppc/ibm4xx.h b/include/asm-ppc/ibm4xx.h
index 6c28ae7807f4..38f99710752b 100644
--- a/include/asm-ppc/ibm4xx.h
+++ b/include/asm-ppc/ibm4xx.h
@@ -51,6 +51,10 @@
#include <platforms/4xx/xilinx_ml300.h>
#endif
+#if defined(CONFIG_XILINX_ML403)
+#include <platforms/4xx/xilinx_ml403.h>
+#endif
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_40x
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index df9cf6ed189d..b919d8fb7d98 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -575,4 +575,11 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
*/
#define xlate_dev_kmem_ptr(p) p
+/* access ports */
+#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v))
+#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
+
+#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v))
+#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
+
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/mpc10x.h b/include/asm-ppc/mpc10x.h
index 77b1e092c206..b30a6a3b5bd2 100644
--- a/include/asm-ppc/mpc10x.h
+++ b/include/asm-ppc/mpc10x.h
@@ -1,6 +1,4 @@
/*
- * arch/ppc/kernel/mpc10x.h
- *
* Common routines for the Motorola SPS MPC106/8240/107 Host bridge/Mem
* ctlr/EPIC/etc.
*
@@ -165,6 +163,7 @@ enum ppc_sys_devices {
MPC10X_DMA1,
MPC10X_UART0,
MPC10X_UART1,
+ NUM_PPC_SYS_DEVS,
};
int mpc10x_bridge_init(struct pci_controller *hose,
diff --git a/include/asm-ppc/mpc52xx.h b/include/asm-ppc/mpc52xx.h
index a055e0756b9d..6167f74635f7 100644
--- a/include/asm-ppc/mpc52xx.h
+++ b/include/asm-ppc/mpc52xx.h
@@ -60,6 +60,7 @@ enum ppc_sys_devices {
MPC52xx_ATA,
MPC52xx_I2C1,
MPC52xx_I2C2,
+ NUM_PPC_SYS_DEVS,
};
diff --git a/include/asm-ppc/mpc8260.h b/include/asm-ppc/mpc8260.h
index 321452695039..6ba69a86b9dd 100644
--- a/include/asm-ppc/mpc8260.h
+++ b/include/asm-ppc/mpc8260.h
@@ -83,6 +83,7 @@ enum ppc_sys_devices {
MPC82xx_CPM_SMC2,
MPC82xx_CPM_USB,
MPC82xx_SEC1,
+ NUM_PPC_SYS_DEVS,
};
#ifndef __ASSEMBLY__
diff --git a/include/asm-ppc/mpc83xx.h b/include/asm-ppc/mpc83xx.h
index 7cdf60fa69b6..3c23fc43bfbc 100644
--- a/include/asm-ppc/mpc83xx.h
+++ b/include/asm-ppc/mpc83xx.h
@@ -108,6 +108,7 @@ enum ppc_sys_devices {
MPC83xx_USB2_DR,
MPC83xx_USB2_MPH,
MPC83xx_MDIO,
+ NUM_PPC_SYS_DEVS,
};
#endif /* CONFIG_83xx */
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index c8a96aa44fb7..f47002a60edf 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -139,6 +139,7 @@ enum ppc_sys_devices {
MPC85xx_eTSEC4,
MPC85xx_IIC2,
MPC85xx_MDIO,
+ NUM_PPC_SYS_DEVS,
};
/* Internal interrupts are all Level Sensitive, and Positive Polarity */
diff --git a/include/asm-ppc/mpc8xx.h b/include/asm-ppc/mpc8xx.h
index 46f159cf589e..3515a7fa6c89 100644
--- a/include/asm-ppc/mpc8xx.h
+++ b/include/asm-ppc/mpc8xx.h
@@ -111,8 +111,11 @@ enum ppc_sys_devices {
MPC8xx_CPM_SMC1,
MPC8xx_CPM_SMC2,
MPC8xx_CPM_USB,
+ NUM_PPC_SYS_DEVS,
};
+#define PPC_PIN_SIZE (24 * 1024 * 1024) /* 24Mbytes of data pinned */
+
#ifndef BOARD_CHIP_NAME
#define BOARD_CHIP_NAME ""
#endif
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 6d1c39e8a6af..e1c62da12e74 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -12,6 +12,7 @@
#include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
+#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
struct mm_struct;
extern unsigned long va_to_phys(unsigned long address);
@@ -127,9 +128,8 @@ extern unsigned long ioremap_bot, ioremap_base;
* of RAM. -- Cort
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#ifdef CONFIG_44x
-#include <asm/ibm44x.h>
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC44x_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h
index 83d8c77c124d..4b94f7059ebe 100644
--- a/include/asm-ppc/ppc_sys.h
+++ b/include/asm-ppc/ppc_sys.h
@@ -33,6 +33,8 @@
#include <asm/mpc52xx.h>
#elif defined(CONFIG_MPC10X_BRIDGE)
#include <asm/mpc10x.h>
+#elif defined(CONFIG_XILINX_VIRTEX)
+#include <platforms/4xx/virtex.h>
#else
#error "need definition of ppc_sys_devices"
#endif
@@ -44,9 +46,26 @@ struct ppc_sys_spec {
u32 value;
u32 num_devices;
char *ppc_sys_name;
+ u8 config[NUM_PPC_SYS_DEVS];
enum ppc_sys_devices *device_list;
};
+struct platform_notify_dev_map {
+ const char *bus_id;
+ void (*rtn)(struct platform_device * pdev, int idx);
+};
+
+enum platform_device_func {
+ PPC_SYS_FUNC_DUMMY = 0,
+ PPC_SYS_FUNC_ETH = 1,
+ PPC_SYS_FUNC_UART = 2,
+ PPC_SYS_FUNC_HLDC = 3,
+ PPC_SYS_FUNC_USB = 4,
+ PPC_SYS_FUNC_IRDA = 5,
+};
+
+#define PPC_SYS_CONFIG_DISABLED 1
+
/* describes all specific chips and which devices they have on them */
extern struct ppc_sys_spec ppc_sys_specs[];
extern struct ppc_sys_spec *cur_ppc_sys_spec;
@@ -72,5 +91,20 @@ extern void *ppc_sys_get_pdata(enum ppc_sys_devices dev) __init;
/* remove a device from the system */
extern void ppc_sys_device_remove(enum ppc_sys_devices dev);
+/* Function assignment stuff */
+void ppc_sys_device_initfunc(void);
+void ppc_sys_device_setfunc(enum ppc_sys_devices dev,
+ enum platform_device_func func);
+void ppc_sys_device_set_func_all(enum platform_device_func func);
+
+void platform_notify_map(const struct platform_notify_dev_map *map,
+ struct device *dev);
+
+/* Enable / disable stuff */
+void ppc_sys_device_disable(enum ppc_sys_devices dev);
+void ppc_sys_device_enable(enum ppc_sys_devices dev);
+void ppc_sys_device_enable_all(void);
+void ppc_sys_device_disable_all(void);
+
#endif /* __ASM_PPC_SYS_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/time.h b/include/asm-ppc/time.h
index 321fb75b5f22..c86112323c9f 100644
--- a/include/asm-ppc/time.h
+++ b/include/asm-ppc/time.h
@@ -153,5 +153,10 @@ extern __inline__ unsigned binary_tbl(void) {
({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
unsigned mulhwu_scale_factor(unsigned, unsigned);
+
+#define account_process_vtime(tsk) do { } while (0)
+#define calculate_steal_time() do { } while (0)
+#define snapshot_timebases() do { } while (0)
+
#endif /* __ASM_TIME_H__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/todc.h b/include/asm-ppc/todc.h
index 84bae7d76814..937c7dbe6e5c 100644
--- a/include/asm-ppc/todc.h
+++ b/include/asm-ppc/todc.h
@@ -1,6 +1,4 @@
/*
- * include/asm-ppc/todc.h
- *
* Definitions for the M48Txx and mc146818 series of Time of day/Real Time
* Clock chips.
*
diff --git a/include/asm-ppc/xparameters.h b/include/asm-ppc/xparameters.h
deleted file mode 100644
index fe4eac629139..000000000000
--- a/include/asm-ppc/xparameters.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-ppc/xparameters.h
- *
- * This file includes the correct xparameters.h for the CONFIG'ed board
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * 2004 (c) MontaVista Software, Inc. This file is licensed under the terms
- * of the GNU General Public License version 2. This program is licensed
- * "as is" without any warranty of any kind, whether express or implied.
- */
-
-#include <linux/config.h>
-
-#if defined(CONFIG_XILINX_ML300)
-#include <platforms/4xx/xparameters/xparameters_ml300.h>
-#endif
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index be6fefe223d6..de1d9926aa60 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
-
c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic_cmpxchg(v, c, c + a);
+ if (likely(old == c))
+ break;
c = old;
+ }
return c != u;
}
@@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
long long a, long long u)
{
long long c, old;
-
c = atomic64_read(v);
- while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + a);
+ if (likely(old == c))
+ break;
c = old;
+ }
return c != u;
}
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 123fcaca295e..e10ed87094f0 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset[__i], \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset[__i], \
+ (src), (size)); \
} while (0)
#else /* ! SMP */
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 25256bdc8aae..468eb48d8142 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *);
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
- c != (u); \
+ } \
+ likely(c != (u)); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index f7d35a2ae9b8..e9df17acedde 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -13,4 +13,6 @@
#define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index aea4e51e7cd1..82032e159a76 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5");
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
} while (0)
#else /* ! SMP */
diff --git a/include/asm-um/alternative.h b/include/asm-um/alternative.h
new file mode 100644
index 000000000000..b6434396bd42
--- /dev/null
+++ b/include/asm-um/alternative.h
@@ -0,0 +1,6 @@
+#ifndef __UM_ALTERNATIVE_H
+#define __UM_ALTERNATIVE_H
+
+#include "asm/arch/alternative.h"
+
+#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e772..cecbf7baa6aa 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 263f0a211ed7..c8043a16152e 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,6 +20,8 @@
__attribute__((__section__(".data.page_aligned")))
#endif
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 29a6b0408f75..4405b4adeaba 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,10 +26,9 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
} while (0)
extern void setup_per_cpu_areas(void);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d22e632f41fb..cc4b3aafad9a 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,9 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
-#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
-#else
+#ifndef __read_mostly
#define __read_mostly
#endif
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index b68fdf1f3156..3c9b0bc05123 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -378,7 +378,6 @@ struct cdrom_generic_command
#define CDC_MEDIA_CHANGED 0x80 /* media changed */
#define CDC_PLAY_AUDIO 0x100 /* audio functions */
#define CDC_RESET 0x200 /* hard reset device */
-#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */
#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */
#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */
#define CDC_CD_R 0x2000 /* drive is a CD-R */
@@ -974,9 +973,7 @@ struct cdrom_device_ops {
int (*reset) (struct cdrom_device_info *);
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
- /* dev-specific */
- int (*dev_ioctl) (struct cdrom_device_info *,
- unsigned int, unsigned long);
+
/* driver specifications */
const int capability; /* capability flags */
int n_minors; /* number of active minor devices */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 1289f0ec4c00..1e4bdfcf83a2 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -52,7 +52,12 @@ struct file;
#ifdef CONFIG_EPOLL
/* Used to initialize the epoll bits inside the "struct file" */
-void eventpoll_init_file(struct file *file);
+static inline void eventpoll_init_file(struct file *file)
+{
+ INIT_LIST_HEAD(&file->f_ep_links);
+ spin_lock_init(&file->f_ep_lock);
+}
+
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
@@ -85,7 +90,6 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
-
#else
static inline void eventpoll_init_file(struct file *file) {}
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index c0272d73ab20..e7239f2f97a1 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -772,9 +772,12 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
/* inode.c */
-extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
-extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_get_block_handle(handle_t *handle, struct inode *inode,
+ sector_t iblock, struct buffer_head *bh_result, int create,
+ int extend_disksize);
extern void ext3_read_inode (struct inode *);
extern int ext3_write_inode (struct inode *, int);
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index e71dd98dbcae..7abf90147180 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -19,6 +19,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/seqlock.h>
+#include <linux/mutex.h>
struct ext3_reserve_window {
__u32 _rsv_start; /* First byte reserved */
@@ -122,16 +123,16 @@ struct ext3_inode_info {
__u16 i_extra_isize;
/*
- * truncate_sem is for serialising ext3_truncate() against
+ * truncate_mutex is for serialising ext3_truncate() against
* ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
* data tree are chopped off during truncate. We can't do that in
* ext3 because whenever we perform intermediate commits during
* truncate, the inode and all the metadata blocks *must* be in a
* consistent state which allows truncation of the orphans to restart
* during recovery. Hence we must fix the get_block-vs-truncate race
- * by other means, so we have truncate_sem.
+ * by other means, so we have truncate_mutex.
*/
- struct semaphore truncate_sem;
+ struct mutex truncate_mutex;
struct inode vfs_inode;
};
diff --git a/include/linux/file.h b/include/linux/file.h
index 9901b850f2e4..9f7c2513866f 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
/*
* The default fd array needs to be at least BITS_PER_LONG,
@@ -17,10 +18,22 @@
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
+/*
+ * The embedded_fd_set is a small fd_set,
+ * suitable for most tasks (which open <= BITS_PER_LONG files)
+ */
+struct embedded_fd_set {
+ unsigned long fds_bits[1];
+};
+
+/*
+ * More than this number of fds: we use a separately allocated fd_set
+ */
+#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set))
+
struct fdtable {
unsigned int max_fds;
int max_fdset;
- int next_fd;
struct file ** fd; /* current fd array */
fd_set *close_on_exec;
fd_set *open_fds;
@@ -33,13 +46,20 @@ struct fdtable {
* Open file table structure
*/
struct files_struct {
+ /*
+ * read mostly part
+ */
atomic_t count;
struct fdtable *fdt;
struct fdtable fdtab;
- fd_set close_on_exec_init;
- fd_set open_fds_init;
+ /*
+ * written part on a separate cache line in SMP
+ */
+ spinlock_t file_lock ____cacheline_aligned_in_smp;
+ int next_fd;
+ struct embedded_fd_set close_on_exec_init;
+ struct embedded_fd_set open_fds_init;
struct file * fd_array[NR_OPEN_DEFAULT];
- spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
};
#define files_fdtable(files) (rcu_dereference((files)->fdt))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 128d0082522c..f9c9dea636d0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -397,8 +397,8 @@ struct block_device {
dev_t bd_dev; /* not a kdev_t - it's a search key */
struct inode * bd_inode; /* will die */
int bd_openers;
- struct semaphore bd_sem; /* open/close mutex */
- struct semaphore bd_mount_sem; /* mount mutex */
+ struct mutex bd_mutex; /* open/close mutex */
+ struct mutex bd_mount_mutex; /* mount mutex */
struct list_head bd_inodes;
void * bd_holder;
int bd_holders;
@@ -509,7 +509,7 @@ struct inode {
#ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */
- struct semaphore inotify_sem; /* protects the watches list */
+ struct mutex inotify_mutex; /* protects the watches list */
#endif
unsigned long i_state;
@@ -847,7 +847,7 @@ struct super_block {
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
*/
- struct semaphore s_vfs_rename_sem; /* Kludge */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
/* Granuality of c/m/atime in ns.
Cannot be worse than a second */
@@ -1115,6 +1115,18 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+static inline void inode_inc_link_count(struct inode *inode)
+{
+ inode->i_nlink++;
+ mark_inode_dirty(inode);
+}
+
+static inline void inode_dec_link_count(struct inode *inode)
+{
+ inode->i_nlink--;
+ mark_inode_dirty(inode);
+}
+
extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
static inline void file_accessed(struct file *file)
{
@@ -1534,7 +1546,7 @@ extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int remove_suid(struct dentry *);
extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
-extern struct semaphore iprune_sem;
+extern struct mutex iprune_mutex;
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *);
diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h
index 0abe9d9a0069..652611a4bdcd 100644
--- a/include/linux/generic_serial.h
+++ b/include/linux/generic_serial.h
@@ -12,6 +12,8 @@
#ifndef GENERIC_SERIAL_H
#define GENERIC_SERIAL_H
+#include <linux/mutex.h>
+
struct real_driver {
void (*disable_tx_interrupts) (void *);
void (*enable_tx_interrupts) (void *);
@@ -34,7 +36,7 @@ struct gs_port {
int xmit_head;
int xmit_tail;
int xmit_cnt;
- struct semaphore port_write_sem;
+ struct mutex port_write_mutex;
int flags;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index eef5ccdcd731..fd647fde5ec1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -149,22 +149,16 @@ struct disk_attribute {
({ \
typeof(gendiskp->dkstats->field) res = 0; \
int i; \
- for (i=0; i < NR_CPUS; i++) { \
- if (!cpu_possible(i)) \
- continue; \
+ for_each_cpu(i) \
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
- } \
res; \
})
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
int i;
- for (i=0; i < NR_CPUS; i++) {
- if (cpu_possible(i)) {
- memset(per_cpu_ptr(gendiskp->dkstats, i), value,
- sizeof (struct disk_stats));
- }
- }
+ for_each_cpu(i)
+ memset(per_cpu_ptr(gendiskp->dkstats, i), value,
+ sizeof (struct disk_stats));
}
#else
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index dcfd2ecccb5d..92146f3b7423 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -7,11 +7,10 @@
#define INIT_FDTABLE \
{ \
.max_fds = NR_OPEN_DEFAULT, \
- .max_fdset = __FD_SETSIZE, \
- .next_fd = 0, \
+ .max_fdset = EMBEDDED_FD_SET_SIZE, \
.fd = &init_files.fd_array[0], \
- .close_on_exec = &init_files.close_on_exec_init, \
- .open_fds = &init_files.open_fds_init, \
+ .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \
+ .open_fds = (fd_set *)&init_files.open_fds_init, \
.rcu = RCU_HEAD_INIT, \
.free_files = NULL, \
.next = NULL, \
@@ -20,9 +19,10 @@
#define INIT_FILES \
{ \
.count = ATOMIC_INIT(1), \
- .file_lock = SPIN_LOCK_UNLOCKED, \
.fdt = &init_files.fdtab, \
.fdtab = INIT_FDTABLE, \
+ .file_lock = SPIN_LOCK_UNLOCKED, \
+ .next_fd = 0, \
.close_on_exec_init = { { 0, } }, \
.open_fds_init = { { 0, } }, \
.fd_array = { NULL, } \
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 41ee79962bb2..2ccbfb6340ba 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -28,6 +28,7 @@
#include <linux/journal-head.h>
#include <linux/stddef.h>
#include <linux/bit_spinlock.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
#endif
@@ -575,7 +576,7 @@ struct transaction_s
* @j_wait_checkpoint: Wait queue to trigger checkpointing
* @j_wait_commit: Wait queue to trigger commit
* @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints
+ * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
* @j_head: Journal head - identifies the first unused block in the journal
* @j_tail: Journal tail - identifies the oldest still-used block in the
* journal.
@@ -645,7 +646,7 @@ struct journal_s
int j_barrier_count;
/* The barrier lock itself */
- struct semaphore j_barrier;
+ struct mutex j_barrier;
/*
* Transactions: The current running transaction...
@@ -687,7 +688,7 @@ struct journal_s
wait_queue_head_t j_wait_updates;
/* Semaphore for locking against concurrent checkpoints */
- struct semaphore j_checkpoint_sem;
+ struct mutex j_checkpoint_mutex;
/*
* Journal head: identifies the first unused block in the journal.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3b507bf05d09..bb6e7ddee2fd 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
__attribute__ ((NORET_AND format (printf, 1, 2)));
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern int oops_may_print(void);
fastcall NORET_TYPE void do_exit(long error_code)
ATTRIB_NORET;
NORET_TYPE void complete_and_exit(struct completion *, long)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 669756bc20a2..778adc0fa640 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -36,6 +36,7 @@
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/mutex.h>
#ifdef CONFIG_KPROBES
#include <asm/kprobes.h>
@@ -152,7 +153,7 @@ struct kretprobe_instance {
};
extern spinlock_t kretprobe_lock;
-extern struct semaphore kprobe_mutex;
+extern struct mutex kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/loop.h b/include/linux/loop.h
index f96506782ebe..e76c7611d6cc 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -17,6 +17,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
/* Possible states of device */
enum {
@@ -60,7 +61,7 @@ struct loop_device {
int lo_state;
struct completion lo_done;
struct completion lo_bh_done;
- struct semaphore lo_ctl_mutex;
+ struct mutex lo_ctl_mutex;
int lo_pending;
request_queue_t *lo_queue;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 8bcd9450d926..779e6a5744c7 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -184,6 +184,7 @@ struct fat_slot_info {
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
struct fat_mount_options {
uid_t fs_uid;
@@ -226,7 +227,7 @@ struct msdos_sb_info {
unsigned long max_cluster; /* maximum cluster number */
unsigned long root_cluster; /* first cluster of the root directory */
unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
- struct semaphore fat_lock;
+ struct mutex fat_lock;
unsigned int prev_free; /* previously allocated cluster number */
unsigned int free_clusters; /* -1 if undefined */
struct fat_mount_options options;
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index f95d51fae733..a6ce409ec6fc 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -38,6 +38,7 @@ enum {
#ifdef __KERNEL__
#include <linux/wait.h>
+#include <linux/mutex.h>
/* values for flags field */
#define NBD_READ_ONLY 0x0001
@@ -57,7 +58,7 @@ struct nbd_device {
struct request *active_req;
wait_queue_head_t active_wq;
- struct semaphore tx_lock;
+ struct mutex tx_lock;
struct gendisk *disk;
int blksize;
u64 bytesize;
diff --git a/include/linux/ncp_fs_i.h b/include/linux/ncp_fs_i.h
index 415be1ec6f98..bdb4c8ae6924 100644
--- a/include/linux/ncp_fs_i.h
+++ b/include/linux/ncp_fs_i.h
@@ -19,7 +19,7 @@ struct ncp_inode_info {
__le32 DosDirNum;
__u8 volNumber;
__le32 nwattr;
- struct semaphore open_sem;
+ struct mutex open_mutex;
atomic_t opened;
int access;
int flags;
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index cf858eb80f0b..b089d9506283 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/ncp_mount.h>
#include <linux/net.h>
+#include <linux/mutex.h>
#ifdef __KERNEL__
@@ -51,7 +52,7 @@ struct ncp_server {
receive replies */
int lock; /* To prevent mismatch in protocols. */
- struct semaphore sem;
+ struct mutex mutex;
int current_size; /* for packet preparation */
int has_subfunction;
@@ -96,7 +97,7 @@ struct ncp_server {
struct {
struct work_struct tq; /* STREAM/DGRAM: data/error ready */
struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
- struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */
+ struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */
unsigned int state; /* STREAM only: receiver state */
struct {
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5be87ba3b7ac..6df2585c0169 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -188,6 +188,8 @@ extern void device_power_up(void);
extern void device_resume(void);
#ifdef CONFIG_PM
+extern suspend_disk_method_t pm_disk_mode;
+
extern int device_suspend(pm_message_t state);
#define device_set_wakeup_enable(dev,val) \
@@ -215,7 +217,6 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
static inline void dpm_runtime_resume(struct device * dev)
{
-
}
#endif
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 026969a5595c..1f2fea6640a4 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -14,6 +14,7 @@
struct proc_dir_entry;
struct pt_regs;
+struct notifier_block;
/* init basic kernel profiler */
void __init profile_init(void);
@@ -32,7 +33,6 @@ enum profile_type {
#ifdef CONFIG_PROFILING
-struct notifier_block;
struct task_struct;
struct mm_struct;
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f33aeb22c26a..8dc2d04a103f 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -38,6 +38,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#define __DQUOT_VERSION__ "dquot_6.5.1"
#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
@@ -215,7 +216,7 @@ struct dquot {
struct list_head dq_inuse; /* List of all quotas */
struct list_head dq_free; /* Free list element */
struct list_head dq_dirty; /* List of dirty dquots */
- struct semaphore dq_lock; /* dquot IO lock */
+ struct mutex dq_lock; /* dquot IO lock */
atomic_t dq_count; /* Use count */
wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
@@ -285,8 +286,8 @@ struct quota_format_type {
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
- struct semaphore dqio_sem; /* lock device while I/O in progress */
- struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */
+ struct mutex dqio_mutex; /* lock device while I/O in progress */
+ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
index 9d5494aaac0f..3009c813d83d 100644
--- a/include/linux/raid/raid1.h
+++ b/include/linux/raid/raid1.h
@@ -130,6 +130,6 @@ struct r1bio_s {
* with failure when last write completes (and all failed).
* Record that bi_end_io was called with this flag...
*/
-#define R1BIO_Returned 4
+#define R1BIO_Returned 6
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c2ec6c77874e..5673008b61e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -113,8 +113,6 @@ struct rcu_data {
DECLARE_PER_CPU(struct rcu_data, rcu_data);
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
-extern struct rcu_ctrlblk rcu_ctrlblk;
-extern struct rcu_ctrlblk rcu_bh_ctrlblk;
/*
* Increment the quiescent state counter.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 850a974ee505..b95f6eb7254c 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/string.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
struct seq_operations;
struct file;
@@ -19,7 +19,7 @@ struct seq_file {
size_t count;
loff_t index;
loff_t version;
- struct semaphore sem;
+ struct mutex lock;
struct seq_operations *op;
void *private;
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 12415dd94451..54eac8a39a4c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -234,14 +234,15 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v
/* linux/mm/swapfile.c */
extern long total_swap_pages;
extern unsigned int nr_swapfiles;
-extern struct swap_info_struct swap_info[];
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
-extern swp_entry_t get_swap_page_of_type(int type);
+extern swp_entry_t get_swap_page_of_type(int);
extern int swap_duplicate(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
extern void free_swap_and_cache(swp_entry_t);
+extern int swap_type_of(dev_t);
+extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int can_share_swap_page(struct page *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index f45cd74e6f24..f13f49afe198 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -24,6 +24,7 @@
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
#include <linux/screen_info.h>
+#include <linux/mutex.h>
#include <asm/system.h>
@@ -231,8 +232,8 @@ struct tty_struct {
int canon_data;
unsigned long canon_head;
unsigned int canon_column;
- struct semaphore atomic_read;
- struct semaphore atomic_write;
+ struct mutex atomic_read_lock;
+ struct mutex atomic_write_lock;
unsigned char *write_buf;
int write_cnt;
spinlock_t read_lock;
@@ -319,8 +320,7 @@ extern void tty_ldisc_put(int);
extern void tty_wakeup(struct tty_struct *tty);
extern void tty_ldisc_flush(struct tty_struct *tty);
-struct semaphore;
-extern struct semaphore tty_sem;
+extern struct mutex tty_mutex;
/* n_tty.c */
extern struct tty_ldisc tty_ldisc_N_TTY;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 222faf97d5f9..0c6169fff366 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -7,14 +7,8 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c
extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
-#ifdef INCLUDE_INLINE_FUNCS
-#define _INLINE_ extern
-#else
-#define _INLINE_ static __inline__
-#endif
-
-_INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
- unsigned char ch, char flag)
+static inline int tty_insert_flip_char(struct tty_struct *tty,
+ unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
if (tb && tb->active && tb->used < tb->size) {
@@ -25,7 +19,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
-_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
+static inline void tty_schedule_flip(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
diff --git a/include/linux/udf_fs_sb.h b/include/linux/udf_fs_sb.h
index b15ff2e99c91..80ae9ef940dc 100644
--- a/include/linux/udf_fs_sb.h
+++ b/include/linux/udf_fs_sb.h
@@ -13,7 +13,7 @@
#ifndef _UDF_FS_SB_H
#define _UDF_FS_SB_H 1
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#pragma pack(1)
@@ -111,7 +111,7 @@ struct udf_sb_info
/* VAT inode */
struct inode *s_vat;
- struct semaphore s_alloc_sem;
+ struct mutex s_alloc_mutex;
};
#endif /* _UDF_FS_SB_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index fab5aed8ca31..530ae3f4248c 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -73,6 +73,11 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
int vt_waitactive(int vt);
void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
+#ifdef CONFIG_VT
+int is_console_suspend_safe(void);
+#else
+static inline int is_console_suspend_safe(void) { return 1; }
+#endif
/*
* vc_screen.c shares this temporary buffer with the console write code so that