summaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorTörök Edwin <edwintorok@gmail.com>2008-11-24 10:17:42 +0200
committerIngo Molnar <mingo@elte.hu>2008-11-24 10:00:28 +0100
commit7918baa555140989eeee1270f48533987d48fdba (patch)
tree553b1bbf141ab4c671e49bb81e14ae82b0f37b6f /kernel/mutex.c
parente25cf3db560e803292946ef23a30c69e341ce56f (diff)
downloadlinux-7918baa555140989eeee1270f48533987d48fdba.tar.gz
linux-7918baa555140989eeee1270f48533987d48fdba.tar.bz2
linux-7918baa555140989eeee1270f48533987d48fdba.zip
mutex: __used is needed for function referenced only from inline asm
Impact: fix build failure on llvm-gcc-4.2 According to the gcc manual, the 'used' attribute should be applied to functions referenced only from inline assembly. This fixes a build failure with llvm-gcc-4.2, which deleted __mutex_lock_slowpath, __mutex_unlock_slowpath. Signed-off-by: Török Edwin <edwintorok@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 39a3816b68d9..4f45d4b658ef 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-static void noinline __sched
+static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
/***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif
-static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/***
* mutex_unlock - release the mutex
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
/*
* Release the lock, slowpath:
*/
-static noinline void
+static __used noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
}
EXPORT_SYMBOL(mutex_lock_killable);
-static noinline void __sched
+static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);