diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2006-01-08 01:02:19 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 20:13:48 -0800 |
commit | a57004e1afb6ee03c509f1b1ec74a000682ab93b (patch) | |
tree | 846112d14bc2ea20ebaf069c83909485fc91655c | |
parent | 8382bf2e72d16d0532e351299121ccd3bca0fdd8 (diff) | |
download | linux-stable-a57004e1afb6ee03c509f1b1ec74a000682ab93b.tar.gz linux-stable-a57004e1afb6ee03c509f1b1ec74a000682ab93b.tar.bz2 linux-stable-a57004e1afb6ee03c509f1b1ec74a000682ab93b.zip |
[PATCH] atomic: dec_and_lock use atomic primitives
Convert atomic_dec_and_lock to use new atomic primitives.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: "Paul E. McKenney" <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | lib/dec_and_lock.c | 49 |
1 files changed, 6 insertions, 43 deletions
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 305a9663aee3..a65c31455541 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -1,47 +1,11 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/atomic.h> -#include <asm/system.h> -#ifdef __HAVE_ARCH_CMPXCHG /* * This is an implementation of the notion of "decrement a * reference count, and return locked if it decremented to zero". * - * This implementation can be used on any architecture that - * has a cmpxchg, and where atomic->value is an int holding - * the value of the atomic (i.e. the high bits aren't used - * for a lock or anything like that). - */ -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) -{ - int counter; - int newcount; - - for (;;) { - counter = atomic_read(atomic); - newcount = counter - 1; - if (!newcount) - break; /* do it the slow way */ - - newcount = cmpxchg(&atomic->counter, counter, newcount); - if (newcount == counter) - return 0; - } - - spin_lock(lock); - if (atomic_dec_and_test(atomic)) - return 1; - spin_unlock(lock); - return 0; -} -#else -/* - * This is an architecture-neutral, but slow, - * implementation of the notion of "decrement - * a reference count, and return locked if it - * decremented to zero". - * * NOTE NOTE NOTE! This is _not_ equivalent to * * if (atomic_dec_and_test(&atomic)) { @@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) * * because the spin-lock and the decrement must be * "atomic". - * - * This slow version gets the spinlock unconditionally, - * and releases it if it isn't needed. Architectures - * are encouraged to come up with better approaches, - * this is trivially done efficiently using a load-locked - * store-conditional approach, for example. */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { +#ifdef CONFIG_SMP + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; +#endif + /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; spin_unlock(lock); return 0; } -#endif EXPORT_SYMBOL(_atomic_dec_and_lock); |