diff options
author | Tim Chen <tim.c.chen@linux.intel.com> | 2014-01-21 15:36:00 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-28 13:13:27 +0100 |
commit | e72246748ff006ab928bc774e276e6ef5542f9c5 (patch) | |
tree | b3021f1615d2088ce20fc02bd61e9f2baab72dd1 /include/linux/mutex.h | |
parent | aff7385b5a16bca6b8d9243f01a9ea5a5b411e1d (diff) | |
download | linux-stable-e72246748ff006ab928bc774e276e6ef5542f9c5.tar.gz linux-stable-e72246748ff006ab928bc774e276e6ef5542f9c5.tar.bz2 linux-stable-e72246748ff006ab928bc774e276e6ef5542f9c5.zip |
locking/mutexes/mcs: Restructure the MCS lock defines and locking code into its own file
We will need the MCS lock code for doing optimistic spinning for rwsem
and queued rwlock. Extracting the MCS code from mutex.c and put into
its own file allow us to reuse this code easily.
We also inline mcs_spin_lock and mcs_spin_unlock functions
for better efficiency.
Note that using the smp_load_acquire/smp_store_release pair used in
mcs_lock and mcs_unlock is not sufficient to form a full memory barrier
across cpus for many architectures (except x86). For applications that
absolutely need a full barrier across multiple cpus with mcs_unlock and
mcs_lock pair, smp_mb__after_unlock_lock() should be used after mcs_lock.
Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Davidlohr Bueso <davidlohr@hp.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1390347360.3138.63.camel@schen9-DESK
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/mutex.h')
-rw-r--r-- | include/linux/mutex.h | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index d3181936c138..c482e1d2cc49 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -46,6 +46,7 @@ * - detects multi-task circular deadlocks and prints out all affected * locks and tasks (and only those tasks) */ +struct mcs_spinlock; struct mutex { /* 1: unlocked, 0: locked, negative: locked, possible waiters */ atomic_t count; @@ -55,7 +56,7 @@ struct mutex { struct task_struct *owner; #endif #ifdef CONFIG_MUTEX_SPIN_ON_OWNER - void *spin_mlock; /* Spinner MCS lock */ + struct mcs_spinlock *mcs_lock; /* Spinner MCS lock */ #endif #ifdef CONFIG_DEBUG_MUTEXES const char *name; @@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); # define arch_mutex_cpu_relax() cpu_relax() #endif -#endif +#endif /* __LINUX_MUTEX_H */ |