diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-09-13 12:16:14 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-09-15 11:15:50 -0400 |
commit | fbc39b4ca3bed38c6d62c658af2157d2ec9efa03 (patch) | |
tree | 2d85e2355285e5a7de97940546e2d10821427e34 /drivers/md/dm-rq.c | |
parent | a8ac51e4ab97765838ae6a07d6ff7f7bfaaa0ea3 (diff) | |
download | linux-stable-fbc39b4ca3bed38c6d62c658af2157d2ec9efa03.tar.gz linux-stable-fbc39b4ca3bed38c6d62c658af2157d2ec9efa03.tar.bz2 linux-stable-fbc39b4ca3bed38c6d62c658af2157d2ec9efa03.zip |
dm rq: reduce arguments passed to map_request() and dm_requeue_original_request()
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Diffstat (limited to 'drivers/md/dm-rq.c')
-rw-r--r-- | drivers/md/dm-rq.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index dbced7b15931..8eefc0ad7a59 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -349,9 +349,10 @@ static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) spin_unlock_irqrestore(q->queue_lock, flags); } -static void dm_requeue_original_request(struct mapped_device *md, - struct request *rq, bool delay_requeue) +static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) { + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; int rw = rq_data_dir(rq); rq_end_stats(md, rq); @@ -390,7 +391,7 @@ static void dm_done(struct request *clone, int error, bool mapped) return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ - dm_requeue_original_request(tio->md, tio->orig, false); + dm_requeue_original_request(tio, false); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); @@ -634,11 +635,12 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ -static int map_request(struct dm_rq_target_io *tio, struct request *rq, - struct mapped_device *md) +static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; + struct mapped_device *md = tio->md; + struct request *rq = tio->orig; struct request *clone = NULL; if (tio->clone) { @@ -676,7 +678,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ - dm_requeue_original_request(md, tio->orig, true); + dm_requeue_original_request(tio, true); break; default: if (r > 0) { @@ -727,11 +729,9 @@ static void dm_start_request(struct mapped_device *md, struct request *orig) static void map_tio_request(struct kthread_work *work) { struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); - struct request *rq = tio->orig; - struct mapped_device *md = tio->md; - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_original_request(md, rq, false); + if (map_request(tio) == DM_MAPIO_REQUEUE) + dm_requeue_original_request(tio, false); } ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) @@ -917,7 +917,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, tio->ti = ti; /* Direct call is fine since .queue_rq allows allocations */ - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { + if (map_request(tio) == DM_MAPIO_REQUEUE) { /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); rq_completed(md, rq_data_dir(rq), false); |