summaryrefslogtreecommitdiffstats
path: root/arch/metag
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2014-05-08 15:51:37 -0400
committerJames Hogan <james.hogan@imgtec.com>2014-05-15 00:00:34 +0100
commit2425ce84026c385b73ae72039f90d042d49e0394 (patch)
treec7f5c430603383d0875633f983d0c21030b8d715 /arch/metag
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
downloadlinux-stable-2425ce84026c385b73ae72039f90d042d49e0394.tar.gz
linux-stable-2425ce84026c385b73ae72039f90d042d49e0394.tar.bz2
linux-stable-2425ce84026c385b73ae72039f90d042d49e0394.zip
metag: fix memory barriers
Volatile access doesn't really imply the compiler barrier. Volatile access is only ordered with respect to other volatile accesses, it isn't ordered with respect to general memory accesses. Gcc may reorder memory accesses around volatile access, as we can see in this simple example (if we compile it with optimization, both increments of *b will be collapsed to just one): void fn(volatile int *a, long *b) { (*b)++; *a = 10; (*b)++; } Consequently, we need the compiler barrier after a write to the volatile variable, to make sure that the compiler doesn't reorder the volatile write with something else. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Cc: stable@vger.kernel.org Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/include/asm/barrier.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5d6b4b407dda..2d6f0de77325 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
barrier();
*flushptr = 0;
+ barrier();
}
#else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
*flushptr = 0;
*flushptr = 0;
*flushptr = 0;
+ barrier();
}
#endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
barrier();
*flushptr = 0;
+ barrier();
}
#define smp_mb() fence()
#define smp_rmb() fence()