diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-21 15:58:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-21 15:58:17 -0800 |
commit | e952f31bce6e9f64db01f607abc46529ba57ac9e (patch) | |
tree | b746bcd315d4f86c9ed7617939f29339fc692852 /include/asm-ia64/mutex.h | |
parent | e0f4ab8a1741193891f096aa63df9ac8672af54c (diff) | |
parent | 133a58c1fd97022690d53dd58df56579193cbc1d (diff) | |
download | linux-e952f31bce6e9f64db01f607abc46529ba57ac9e.tar.gz linux-e952f31bce6e9f64db01f607abc46529ba57ac9e.tar.bz2 linux-e952f31bce6e9f64db01f607abc46529ba57ac9e.zip |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64-SGI] SN2-XP reduce kmalloc wrapper inlining
[IA64] MCA: remove obsolete ifdef
[IA64] MCA: update MCA comm field for user space tasks
[IA64] MCA: print messages in MCA handler
[IA64-SGI] - Eliminate SN pio_phys_xxx macros. Move to assembly
[IA64] use icc defined constant
[IA64] add __builtin_trap definition for icc build
[IA64] clean up asm/intel_intrin.h
[IA64] map ia64_hint definition to intel compiler intrinsic
[IA64] hooks to wait for mmio writes to drain when migrating processes
[IA64-SGI] driver bugfixes and hardware workarounds for CE1.0 asic
[IA64-SGI] Handle SC env. powerdown events
[IA64] Delete MCA/INIT sigdelayed code
[IA64-SGI] sem2mutex ioc4.c
[IA64] implement ia64 specific mutex primitives
[IA64] Fix UP build with BSP removal support.
[IA64] support for cpu0 removal
Diffstat (limited to 'include/asm-ia64/mutex.h')
-rw-r--r-- | include/asm-ia64/mutex.h | 93 |
1 files changed, 88 insertions, 5 deletions
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h index 458c1f7fbc18..5a3224f6af38 100644 --- a/include/asm-ia64/mutex.h +++ b/include/asm-ia64/mutex.h @@ -1,9 +1,92 @@ /* - * Pull in the generic implementation for the mutex fastpath. + * ia64 implementation of the mutex fastpath. * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) + * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com> + * + */ + +#ifndef _ASM_MUTEX_H +#define _ASM_MUTEX_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function MUST leave the value lower than + * 1 even when the "1" assertion wasn't true. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + fail_fn(count); +} + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if + * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns. + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) + return fail_fn(count); + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the count from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value to + * 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, then the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int ret = ia64_fetchadd4_rel(count, 1); + if (unlikely(ret < 0)) + fail_fn(count); +} + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + * + * If the architecture has no effective trylock variant, it should call the + * <fail_fn> spinlock-based trylock variant unconditionally. */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + if (likely(cmpxchg_acq(count, 1, 0)) == 1) + return 1; + return 0; +} -#include <asm-generic/mutex-dec.h> +#endif |