summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-10 22:07:44 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-10 13:20:47 -0800
commit73165b88ffd29813bf73b331eaf90d3521443236 (patch)
tree224b510df182c5cba7b64fea6202ed9dd414835e
parent042c904c3e35e95ac911e8a2bf4097099b059e1a (diff)
downloadlinux-73165b88ffd29813bf73b331eaf90d3521443236.tar.gz
linux-73165b88ffd29813bf73b331eaf90d3521443236.tar.bz2
linux-73165b88ffd29813bf73b331eaf90d3521443236.zip
[PATCH] fix i386 mutex fastpath on FRAME_POINTER && !DEBUG_MUTEXES
Call the mutex slowpath more conservatively - e.g. FRAME_POINTERS can change the calling convention, in which case a direct branch to the slowpath becomes illegal. Bug found by Hugh Dickins. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/asm-i386/mutex.h16
-rw-r--r--kernel/mutex.c9
2 files changed, 14 insertions, 11 deletions
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 4e5e3de1b9a6..c657d4b09f0a 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -28,7 +28,13 @@ do { \
\
__asm__ __volatile__( \
LOCK " decl (%%eax) \n" \
- " js "#fail_fn" \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
\
:"=a" (dummy) \
: "a" (count) \
@@ -78,7 +84,13 @@ do { \
\
__asm__ __volatile__( \
LOCK " incl (%%eax) \n" \
- " jle "#fail_fn" \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
\
:"=a" (dummy) \
: "a" (count) \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 7eb960661441..d3dcb8b44bac 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -84,12 +84,6 @@ void fastcall __sched mutex_lock(struct mutex *lock)
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
- *
- * NOTE: if asm/mutex.h is included, then some architectures
- * rely on mutex_lock() having _no other code_ here but this
- * fastpath. That allows the assembly fastpath to do
- * tail-merging optimizations. (If you want to put testcode
- * here, do it under #ifndef CONFIG_MUTEX_DEBUG.)
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
}
@@ -115,8 +109,6 @@ void fastcall __sched mutex_unlock(struct mutex *lock)
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
- *
- * NOTE: no other code must be here - see mutex_lock() .
*/
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
@@ -261,7 +253,6 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
*/
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
{
- /* NOTE: no other code must be here - see mutex_lock() */
return __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_interruptible_slowpath);
}