summaryrefslogtreecommitdiffstats
path: root/block/blk-exec.c
diff options
context:
space:
mode:
authorMuthukumar Ratty <muthur@gmail.com>2012-06-29 15:31:49 +0000
committerJames Bottomley <JBottomley@Parallels.com>2012-07-20 08:58:39 +0100
commite81ca6fe85b77109a32489a5db82f575d51dfc98 (patch)
treea881c5b85b07525f28cc546dfde2038cd900cb45 /block/blk-exec.c
parent6548b0e5b875a07e32e924b22a7df3669892c75a (diff)
downloadlinux-e81ca6fe85b77109a32489a5db82f575d51dfc98.tar.gz
linux-e81ca6fe85b77109a32489a5db82f575d51dfc98.tar.bz2
linux-e81ca6fe85b77109a32489a5db82f575d51dfc98.zip
[SCSI] block: Fix blk_execute_rq_nowait() dead queue handling
If the queue is dead blk_execute_rq_nowait() doesn't invoke the done() callback function. That will result in blk_execute_rq() being stuck in wait_for_completion(). Avoid this by initializing rq->end_io to the done() callback before we check the queue state. Also, make sure the queue lock is held around the invocation of the done() callback. Found this through source code review. Signed-off-by: Muthukumar Ratty <muthur@gmail.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Tejun Heo <tj@kernel.org> Acked-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'block/blk-exec.c')
-rw-r--r--block/blk-exec.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/block/blk-exec.c b/block/blk-exec.c
index fb2cbd551621..8b6dc5bd4dd0 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -43,6 +43,9 @@ static void blk_end_sync_rq(struct request *rq, int error)
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution. Don't wait for completion.
+ *
+ * Note:
+ * This function will invoke @done directly if the queue is dead.
*/
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
@@ -51,18 +54,20 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
WARN_ON(irqs_disabled());
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
+
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dead(q))) {
- spin_unlock_irq(q->queue_lock);
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
+ spin_unlock_irq(q->queue_lock);
return;
}
- rq->rq_disk = bd_disk;
- rq->end_io = done;
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */