summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2018-06-21 13:13:16 +0100
committerIngo Molnar <mingo@kernel.org>2018-06-21 14:25:24 +0200
commit2b523f170e399b0e1c8eec2c4b5889735b0d2b9b (patch)
treebcc61f815e7db246eab1c5283d463bb30a66afa2 /arch/riscv
parent4f44b4b2b337e100ae655d03c42d140816c8caf9 (diff)
downloadlinux-stable-2b523f170e399b0e1c8eec2c4b5889735b0d2b9b.tar.gz
linux-stable-2b523f170e399b0e1c8eec2c4b5889735b0d2b9b.tar.bz2
linux-stable-2b523f170e399b0e1c8eec2c4b5889735b0d2b9b.zip
atomics/riscv: Define atomic64_fetch_add_unless()
As a step towards unifying the atomic/atomic64/atomic_long APIs, this patch converts the arch/riscv implementation of atomic64_add_unless() into an implementation of atomic64_fetch_add_unless(). A wrapper in <linux/atomic.h> will build atomic_add_unless() atop of this, provided it is given a preprocessor definition. No functional change is intended as a result of this patch. Acked-by Palmer Dabbelt <palmer@sifive.com> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Albert Ou <albert@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-14-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/include/asm/atomic.h8
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 5f161daefcd2..d959bbaaad41 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -352,7 +352,7 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#define atomic_fetch_add_unless atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+static __always_inline long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long prev, rc;
@@ -369,11 +369,7 @@ static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
: "memory");
return prev;
}
-
-static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- return __atomic64_add_unless(v, a, u) != u;
-}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
/*