summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorakpm@osdl.org <akpm@osdl.org>2005-04-07 11:35:46 -0700
committerGreg KH <gregkh@suse.de>2005-05-12 10:00:19 -0700
commitc05aa51378f0b9fe1ce0d65b6c6b886dd32dd383 (patch)
tree09a041457a31fde98d566a0cd3ad8b9e82d7dc8f
parent92ef2364e62254d5a36d30ffc7f447ac5358b06e (diff)
downloadlinux-stable-c05aa51378f0b9fe1ce0d65b6c6b886dd32dd383.tar.gz
linux-stable-c05aa51378f0b9fe1ce0d65b6c6b886dd32dd383.tar.bz2
linux-stable-c05aa51378f0b9fe1ce0d65b6c6b886dd32dd383.zip
[PATCH] rwsem fix
We should merge this backport - it's needed to prevent deadlocks when dio_complete() does up_read() from IRQ context. And perhaps other places. From: David Howells <dhowells@redhat.com> [PATCH] rwsem: Make rwsems use interrupt disabling spinlocks The attached patch makes read/write semaphores use interrupt disabling spinlocks in the slow path, thus rendering the up functions and trylock functions available for use in interrupt context. This matches the regular semaphore behaviour. I've assumed that the normal down functions must be called with interrupts enabled (since they might schedule), and used the irq-disabling spinlock variants that don't save the flags. Signed-Off-By: David Howells <dhowells@redhat.com> Tested-by: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Chris Wright <chrisw@osdl.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--lib/rwsem-spinlock.c42
-rw-r--r--lib/rwsem.c16
2 files changed, 36 insertions, 22 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 96255f47b1f8..21f0db2c9711 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -140,12 +140,12 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
rwsemtrace(sem, "Entering __down_read");
- spin_lock(&sem->wait_lock);
+ spin_lock_irq(&sem->wait_lock);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity++;
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irq(&sem->wait_lock);
goto out;
}
@@ -160,7 +160,7 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -181,10 +181,12 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
*/
int fastcall __down_read_trylock(struct rw_semaphore *sem)
{
+ unsigned long flags;
int ret = 0;
+
rwsemtrace(sem, "Entering __down_read_trylock");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -192,7 +194,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __down_read_trylock");
return ret;
@@ -209,12 +211,12 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
rwsemtrace(sem, "Entering __down_write");
- spin_lock(&sem->wait_lock);
+ spin_lock_irq(&sem->wait_lock);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->activity = -1;
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irq(&sem->wait_lock);
goto out;
}
@@ -229,7 +231,7 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -250,10 +252,12 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
*/
int fastcall __down_write_trylock(struct rw_semaphore *sem)
{
+ unsigned long flags;
int ret = 0;
+
rwsemtrace(sem, "Entering __down_write_trylock");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */
@@ -261,7 +265,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
ret = 1;
}
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __down_write_trylock");
return ret;
@@ -272,14 +276,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
*/
void fastcall __up_read(struct rw_semaphore *sem)
{
+ unsigned long flags;
+
rwsemtrace(sem, "Entering __up_read");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __up_read");
}
@@ -289,15 +295,17 @@ void fastcall __up_read(struct rw_semaphore *sem)
*/
void fastcall __up_write(struct rw_semaphore *sem)
{
+ unsigned long flags;
+
rwsemtrace(sem, "Entering __up_write");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __up_write");
}
@@ -308,15 +316,17 @@ void fastcall __up_write(struct rw_semaphore *sem)
*/
void fastcall __downgrade_write(struct rw_semaphore *sem)
{
+ unsigned long flags;
+
rwsemtrace(sem, "Entering __downgrade_write");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __downgrade_write");
}
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 924c14e0bfde..7644089ec8fa 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -150,7 +150,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- spin_lock(&sem->wait_lock);
+ spin_lock_irq(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
@@ -163,7 +163,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
@@ -219,15 +219,17 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
*/
struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
+ unsigned long flags;
+
rwsemtrace(sem, "Entering rwsem_wake");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving rwsem_wake");
@@ -241,15 +243,17 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
*/
struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
+ unsigned long flags;
+
rwsemtrace(sem, "Entering rwsem_downgrade_wake");
- spin_lock(&sem->wait_lock);
+ spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
- spin_unlock(&sem->wait_lock);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
return sem;