summaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorIvan Kokshaysky <ink@jurassic.park.msu.ru>2005-10-21 22:06:15 +0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-22 19:38:33 -0700
commitd475f3f47a0427dfee483cecf9a7e9109e991423 (patch)
treebbaa882b849acf13e624e3e1d8f4d31280a4b74b /include/asm-alpha
parent4595f251058609d97a5d792de08c34a7956af816 (diff)
downloadlinux-d475f3f47a0427dfee483cecf9a7e9109e991423.tar.gz
linux-d475f3f47a0427dfee483cecf9a7e9109e991423.tar.bz2
linux-d475f3f47a0427dfee483cecf9a7e9109e991423.zip
[PATCH] alpha: additional smp barriers
As stated in Documentation/atomic_ops.txt, atomic functions returning values must have the memory barriers both before and after the operation. Thanks to DaveM for pointing that out. Signed-off-by: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/atomic.h12
1 files changed, 8 insertions, 4 deletions
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 1b383e3cb68c..0b40bad00289 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -100,18 +100,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
static __inline__ long atomic_add_return(int i, atomic_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" addl %0,%3,%2\n"
" addl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
@@ -120,54 +121,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" addq %0,%3,%2\n"
" addq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
static __inline__ long atomic_sub_return(int i, atomic_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,%3,%2\n"
" subl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
long temp, result;
+ smp_mb();
__asm__ __volatile__(
"1: ldq_l %0,%1\n"
" subq %0,%3,%2\n"
" subq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
- " mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
+ smp_mb();
return result;
}