diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-03-26 18:11:31 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-14 12:48:11 +0200 |
commit | af095dd60bdc52b11c186c3151e8e38d6faa094c (patch) | |
tree | 011abfbf513be8217c8559c53becab9f7181914b /arch/powerpc/include/asm/atomic.h | |
parent | 15e3f6d782fc6ff7e004b40642ad895b91ae78bf (diff) | |
download | linux-af095dd60bdc52b11c186c3151e8e38d6faa094c.tar.gz linux-af095dd60bdc52b11c186c3151e8e38d6faa094c.tar.bz2 linux-af095dd60bdc52b11c186c3151e8e38d6faa094c.zip |
locking,arch,powerpc: Fold atomic_ops
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.
Requires asm_op because PPC asm is weird :-)
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20140508135852.713980957@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 198 |
1 files changed, 77 insertions, 121 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 28992d012926..512d2782b043 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i) __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -static __inline__ void atomic_add(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_add\n\ - add %0,%2,%0\n" - PPC405_ERR77(0,%3) -" stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); +#define ATOMIC_OP(op, asm_op) \ +static __inline__ void atomic_##op(int a, atomic_t *v) \ +{ \ + int t; \ + \ + __asm__ __volatile__( \ +"1: lwarx %0,0,%3 # atomic_" #op "\n" \ + #asm_op " %0,%2,%0\n" \ + PPC405_ERR77(0,%3) \ +" stwcx. %0,0,%3 \n" \ +" bne- 1b\n" \ + : "=&r" (t), "+m" (v->counter) \ + : "r" (a), "r" (&v->counter) \ + : "cc"); \ +} \ + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ +{ \ + int t; \ + \ + __asm__ __volatile__( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \ + #asm_op " %0,%1,%0\n" \ + PPC405_ERR77(0,%2) \ +" stwcx. %0,0,%2 \n" \ +" bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ + : "=&r" (t) \ + : "r" (a), "r" (&v->counter) \ + : "cc", "memory"); \ + \ + return t; \ } -static __inline__ int atomic_add_return(int a, atomic_t *v) -{ - int t; +#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op) - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%2 # atomic_add_return\n\ - add %0,%1,%0\n" - PPC405_ERR77(0,%2) -" stwcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); +ATOMIC_OPS(add, add) +ATOMIC_OPS(sub, subf) - return t; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static __inline__ void atomic_sub(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_sub\n\ - subf %0,%2,%0\n" - PPC405_ERR77(0,%3) -" stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} - -static __inline__ int atomic_sub_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%2 # atomic_sub_return\n\ - subf %0,%1,%0\n" - PPC405_ERR77(0,%2) -" stwcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - static __inline__ void atomic_inc(atomic_t *v) { int t; @@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i) __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -static __inline__ void atomic64_add(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%3 # atomic64_add\n\ - add %0,%2,%0\n\ - stdcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); +#define ATOMIC64_OP(op, asm_op) \ +static __inline__ void atomic64_##op(long a, atomic64_t *v) \ +{ \ + long t; \ + \ + __asm__ __volatile__( \ +"1: ldarx %0,0,%3 # atomic64_" #op "\n" \ + #asm_op " %0,%2,%0\n" \ +" stdcx. %0,0,%3 \n" \ +" bne- 1b\n" \ + : "=&r" (t), "+m" (v->counter) \ + : "r" (a), "r" (&v->counter) \ + : "cc"); \ } -static __inline__ long atomic64_add_return(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%2 # atomic64_add_return\n\ - add %0,%1,%0\n\ - stdcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \ +{ \ + long t; \ + \ + __asm__ __volatile__( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \ + #asm_op " %0,%1,%0\n" \ +" stdcx. %0,0,%2 \n" \ +" bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ + : "=&r" (t) \ + : "r" (a), "r" (&v->counter) \ + : "cc", "memory"); \ + \ + return t; \ } -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -static __inline__ void atomic64_sub(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%3 # atomic64_sub\n\ - subf %0,%2,%0\n\ - stdcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} +#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op) -static __inline__ long atomic64_sub_return(long a, atomic64_t *v) -{ - long t; +ATOMIC64_OPS(add, add) +ATOMIC64_OPS(sub, subf) - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%2 # atomic64_sub_return\n\ - subf %0,%1,%0\n\ - stdcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP - return t; -} +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) static __inline__ void atomic64_inc(atomic64_t *v) { |