summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-07-18 04:14:45 +0200
committerJens Axboe <axboe@nelson.home.kernel.dk>2006-09-30 20:29:34 +0200
commit51da90fcb6acd580e87280eaf4eb1f788021807d (patch)
treefea3d2266942557f0b061aea022bbb0f5e383dec /block
parentcb78b285c8f9d59b0d4e4f6a54c2977ce1d9b880 (diff)
downloadlinux-51da90fcb6acd580e87280eaf4eb1f788021807d.tar.gz
linux-51da90fcb6acd580e87280eaf4eb1f788021807d.tar.bz2
linux-51da90fcb6acd580e87280eaf4eb1f788021807d.zip
[PATCH] ll_rw_blk: cleanup __make_request()
- Don't assign variables that are only used once. - Kill spin_lock() prefetching, it's opportunistic at best. Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'block')
-rw-r--r--block/ll_rw_blk.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index b1ea941f6dc3..e25b4cd2dcd1 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2885,17 +2885,11 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req;
- int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
- unsigned short prio;
- sector_t sector;
+ int el_ret, nr_sectors, barrier, err;
+ const unsigned short prio = bio_prio(bio);
+ const int sync = bio_sync(bio);
- sector = bio->bi_sector;
nr_sectors = bio_sectors(bio);
- cur_nr_sectors = bio_cur_sectors(bio);
- prio = bio_prio(bio);
-
- rw = bio_data_dir(bio);
- sync = bio_sync(bio);
/*
* low level driver can indicate that it wants pages above a
@@ -2904,8 +2898,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
- spin_lock_prefetch(q->queue_lock);
-
barrier = bio_barrier(bio);
if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP;
@@ -2953,9 +2945,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->current_nr_sectors = cur_nr_sectors;
- req->hard_cur_sectors = cur_nr_sectors;
- req->sector = req->hard_sector = sector;
+ req->current_nr_sectors = bio_cur_sectors(bio);
+ req->hard_cur_sectors = req->current_nr_sectors;
+ req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0);
@@ -2973,7 +2965,7 @@ get_rq:
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, rw, bio);
+ req = get_request_wait(q, bio_data_dir(bio), bio);
/*
* After dropping the lock and possibly sleeping here, our request