summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2014-11-24 10:53:11 +0100
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-12-18 09:54:40 +0100
commitaf2e7aaed1ccf30e61af3e096ac2c7df2f2d6c2a (patch)
tree6e9e15b79431ca3f4be281e98491b86205f1fa7f /arch
parent4218091cb45f601b889cd032e39fe6878a426e70 (diff)
downloadlinux-stable-af2e7aaed1ccf30e61af3e096ac2c7df2f2d6c2a.tar.gz
linux-stable-af2e7aaed1ccf30e61af3e096ac2c7df2f2d6c2a.tar.bz2
linux-stable-af2e7aaed1ccf30e61af3e096ac2c7df2f2d6c2a.zip
arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
ACCESS_ONCE does not work reliably on non-scalar types. For example gcc 4.6 and 4.7 might remove the volatile tag for such accesses during the SRA (scalar replacement of aggregates) step (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145) Change the spinlock code to replace ACCESS_ONCE with READ_ONCE. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/spinlock.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index c45b7b1b7197..cee128732435 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ arch_spinlock_t lockval = READ_ONCE(*lock);
return (lockval.next - lockval.owner) > 1;
}
#define arch_spin_is_contended arch_spin_is_contended