summaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-06-20 13:31:05 +0200
committerIngo Molnar <mingo@kernel.org>2013-06-26 12:10:55 +0200
commita41b56efa70e060f650aeb54740aaf52044a1ead (patch)
tree776921423ea47eeeb4451aa9f66c49535bf64048 /kernel/mutex.c
parent1e876e3b1a9df25bb04682b0d48aaa7e8ae1fc82 (diff)
downloadlinux-a41b56efa70e060f650aeb54740aaf52044a1ead.tar.gz
linux-a41b56efa70e060f650aeb54740aaf52044a1ead.tar.bz2
linux-a41b56efa70e060f650aeb54740aaf52044a1ead.zip
arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not
This will allow me to call functions that have multiple arguments if fastpath fails. This is required to support ticket mutexes, because they need to be able to pass an extra argument to the fail function. Originally I duplicated the functions, by adding __mutex_fastpath_lock_retval_arg. This ended up being just a duplication of the existing function, so a way to test if fastpath was called ended up being better. This also cleaned up the reservation mutex patch some by being able to call an atomic_set instead of atomic_xchg, and making it easier to detect if the wrong unlock function was previously used. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: robclark@gmail.com Cc: rostedt@goodmis.org Cc: daniel@ffwll.ch Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c32
1 files changed, 14 insertions, 18 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..42f8dda2467b 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -494,10 +494,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
* mutex_lock_interruptible() and mutex_trylock().
*/
static noinline int __sched
-__mutex_lock_killable_slowpath(atomic_t *lock_count);
+__mutex_lock_killable_slowpath(struct mutex *lock);
static noinline int __sched
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
+__mutex_lock_interruptible_slowpath(struct mutex *lock);
/**
* mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +515,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
int ret;
might_sleep();
- ret = __mutex_fastpath_lock_retval
- (&lock->count, __mutex_lock_interruptible_slowpath);
- if (!ret)
+ ret = __mutex_fastpath_lock_retval(&lock->count);
+ if (likely(!ret)) {
mutex_set_owner(lock);
-
- return ret;
+ return 0;
+ } else
+ return __mutex_lock_interruptible_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +530,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
int ret;
might_sleep();
- ret = __mutex_fastpath_lock_retval
- (&lock->count, __mutex_lock_killable_slowpath);
- if (!ret)
+ ret = __mutex_fastpath_lock_retval(&lock->count);
+ if (likely(!ret)) {
mutex_set_owner(lock);
-
- return ret;
+ return 0;
+ } else
+ return __mutex_lock_killable_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_killable);
@@ -548,18 +548,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
}
static noinline int __sched
-__mutex_lock_killable_slowpath(atomic_t *lock_count)
+__mutex_lock_killable_slowpath(struct mutex *lock)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
-
return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
}
static noinline int __sched
-__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
+__mutex_lock_interruptible_slowpath(struct mutex *lock)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
-
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
}
#endif