summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-25 15:02:09 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-26 13:20:50 +0200
commit1bdadf46eff6804ace5fa46b6856da4799f12b5c (patch)
tree35d5e6a754150ea6acbc8c439c51fb1c8ad3355c /lib
parentf8b6455a9d381fc513efbec0be0c312b96e6eb6b (diff)
downloadlinux-1bdadf46eff6804ace5fa46b6856da4799f12b5c.tar.gz
linux-1bdadf46eff6804ace5fa46b6856da4799f12b5c.tar.bz2
linux-1bdadf46eff6804ace5fa46b6856da4799f12b5c.zip
locking/atomic: atomic64: support ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as this will enable functionality, and once all architectures are converted it will be possible to make significant cleanups to the atomic headers. A number of architectures use asm-generic/atomic64.h, and it's impractical to convert the header and all these architectures in one go. To make it possible to convert them one-by-one, let's make the asm-generic implementation function as either atomic64_*() or arch_atomic64_*() depending on whether ARCH_ATOMIC is selected. To do this, the generic implementations are prefixed as generic_atomic64_*(), and preprocessor definitions map atomic64_*()/arch_atomic64_*() onto these as appropriate. Once all users are moved over to ARCH_ATOMIC the ifdeffery in the header can be simplified and/or removed entirely. For existing users (none of which select ARCH_ATOMIC), there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-11-mark.rutland@arm.com
Diffstat (limited to 'lib')
-rw-r--r--lib/atomic64.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/lib/atomic64.c b/lib/atomic64.c
index e98c85a99787..3df653994177 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
-s64 atomic64_read(const atomic64_t *v)
+s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_read);
+EXPORT_SYMBOL(generic_atomic64_read);
-void atomic64_set(atomic64_t *v, s64 i)
+void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(atomic64_set);
+EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
-void atomic64_##op(s64 a, atomic64_t *v) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
-EXPORT_SYMBOL(atomic64_##op);
+EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
-s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_##op##_return);
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
-s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
@@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
@@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-s64 atomic64_dec_if_positive(atomic64_t *v)
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_dec_if_positive);
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
-s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_cmpxchg);
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
-s64 atomic64_xchg(atomic64_t *v, s64 new)
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
-EXPORT_SYMBOL(atomic64_xchg);
+EXPORT_SYMBOL(generic_atomic64_xchg);
-s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
@@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val;
}
-EXPORT_SYMBOL(atomic64_fetch_add_unless);
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);