summaryrefslogtreecommitdiffstats
path: root/arch/alpha
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/alpha
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
downloadlinux-stable-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.tar.gz
linux-stable-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.tar.bz2
linux-stable-dbb885fecc1b1b35e93416bedd24d21bd20f60ed.zip
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/alpha')
-rw-r--r--arch/alpha/include/asm/atomic.h217
1 files changed, 82 insertions, 135 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index ed60a1ee1ed3..8f8eafbedd7c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,8 +17,8 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i))
@@ -29,145 +29,92 @@
* branch back to restart the operation.
*/
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " addl %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-static __inline__ void atomic64_add(long i, atomic64_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
+#define ATOMIC_OP(op) \
+static __inline__ void atomic_##op(int i, atomic_t * v) \
+{ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "1: ldl_l %0,%1\n" \
+ " " #op "l %0,%2,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter) \
+ :"Ir" (i), "m" (v->counter)); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldl_l %0,%1\n" \
+ " " #op "l %0,%3,%2\n" \
+ " " #op "l %0,%3,%0\n" \
+ " stl_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
}
-static __inline__ void atomic64_sub(long i, atomic64_t * v)
-{
- unsigned long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " subq %0,%2,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter)
- :"Ir" (i), "m" (v->counter));
-}
-
-
-/*
- * Same as above, but return the result value
- */
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " addl %0,%3,%2\n"
- " addl %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
+#define ATOMIC64_OP(op) \
+static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+{ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "1: ldq_l %0,%1\n" \
+ " " #op "q %0,%2,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter) \
+ :"Ir" (i), "m" (v->counter)); \
+} \
+
+#define ATOMIC64_OP_RETURN(op) \
+static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+{ \
+ long temp, result; \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldq_l %0,%1\n" \
+ " " #op "q %0,%3,%2\n" \
+ " " #op "q %0,%3,%0\n" \
+ " stq_c %0,%1\n" \
+ " beq %0,2f\n" \
+ ".subsection 2\n" \
+ "2: br 1b\n" \
+ ".previous" \
+ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
+ :"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_mb(); \
+ return result; \
}
-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " addq %0,%3,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+#define ATOMIC_OPS(opg) \
+ ATOMIC_OP(opg) \
+ ATOMIC_OP_RETURN(opg) \
+ ATOMIC64_OP(opg) \
+ ATOMIC64_OP_RETURN(opg)
-static __inline__ long atomic_sub_return(int i, atomic_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldl_l %0,%1\n"
- " subl %0,%3,%2\n"
- " subl %0,%3,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+ATOMIC_OPS(add)
+ATOMIC_OPS(sub)
-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
-{
- long temp, result;
- smp_mb();
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " subq %0,%3,%0\n"
- " stq_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (temp), "=m" (v->counter), "=&r" (result)
- :"Ir" (i), "m" (v->counter) : "memory");
- smp_mb();
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))