diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2007-12-11 17:41:54 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 10:35:57 +0100 |
commit | 9e6e39f2c478fff2e9d3430cdfe6730877942ed6 (patch) | |
tree | b01b289b331ff899393afcd4651fa75aaec19e1c | |
parent | 3b11313a6c2a42425bf06e92528bda6affd58dec (diff) | |
download | linux-9e6e39f2c478fff2e9d3430cdfe6730877942ed6.tar.gz linux-9e6e39f2c478fff2e9d3430cdfe6730877942ed6.tar.bz2 linux-9e6e39f2c478fff2e9d3430cdfe6730877942ed6.zip |
blk_end_request: changing block layer core (take 4)
This patch converts core parts of block layer to use blk_end_request
interfaces. Related 'uptodate' arguments are converted to 'error'.
'dequeue' argument was originally introduced for end_dequeued_request(),
where no attempt should be made to dequeue the request as it's already
dequeued.
However, it's not necessary as it can be checked with
list_empty(&rq->queuelist).
(Dequeued request has empty list and queued request doesn't.)
And it has been done in blk_end_request interfaces.
As a result of this patch, end_queued_request() and
end_dequeued_request() become identical. A future patch will merge
and rename them and change users of those functions.
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/ll_rw_blk.c | 35 |
1 files changed, 15 insertions, 20 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b2b2509f60e..fb951198c70e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq) void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) { struct request *rq; - int uptodate; if (error && !q->orderr) q->orderr = error; @@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) /* * Okay, sequence complete. */ - uptodate = 1; - if (q->orderr) - uptodate = q->orderr; - q->ordseq = 0; rq = q->orig_bar_rq; - end_that_request_first(rq, uptodate, rq->hard_nr_sectors); - end_that_request_last(rq, uptodate); + if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) + BUG(); } static void pre_flush_end_io(struct request *rq, int error) @@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) * ORDERED_NONE while this request is on it. */ blkdev_dequeue_request(rq); - end_that_request_first(rq, -EOPNOTSUPP, - rq->hard_nr_sectors); - end_that_request_last(rq, -EOPNOTSUPP); + if (__blk_end_request(rq, -EOPNOTSUPP, + blk_rq_bytes(rq))) + BUG(); *rqp = NULL; return 0; } @@ -3713,14 +3708,14 @@ void end_that_request_last(struct request *req, int uptodate) EXPORT_SYMBOL(end_that_request_last); static inline void __end_request(struct request *rq, int uptodate, - unsigned int nr_bytes, int dequeue) + unsigned int nr_bytes) { - if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { - if (dequeue) - blkdev_dequeue_request(rq); - add_disk_randomness(rq->rq_disk); - end_that_request_last(rq, uptodate); - } + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; + + __blk_end_request(rq, error, nr_bytes); } /** @@ -3763,7 +3758,7 @@ EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); **/ void end_queued_request(struct request *rq, int uptodate) { - __end_request(rq, uptodate, blk_rq_bytes(rq), 1); + __end_request(rq, uptodate, blk_rq_bytes(rq)); } EXPORT_SYMBOL(end_queued_request); @@ -3780,7 +3775,7 @@ EXPORT_SYMBOL(end_queued_request); **/ void end_dequeued_request(struct request *rq, int uptodate) { - __end_request(rq, uptodate, blk_rq_bytes(rq), 0); + __end_request(rq, uptodate, blk_rq_bytes(rq)); } EXPORT_SYMBOL(end_dequeued_request); @@ -3806,7 +3801,7 @@ EXPORT_SYMBOL(end_dequeued_request); **/ void end_request(struct request *req, int uptodate) { - __end_request(req, uptodate, req->hard_cur_sectors << 9, 1); + __end_request(req, uptodate, req->hard_cur_sectors << 9); } EXPORT_SYMBOL(end_request); |