summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2018-11-08 14:59:41 -0500
committerMike Snitzer <snitzer@redhat.com>2018-12-18 09:02:25 -0500
commit2adc5c559a0770ef75d1647fbf557c7f56194ef8 (patch)
tree5d14c5c74c6f62b7462b63725edf724e4d1fcc9a
parent24113d4878439baf1f23c1a33dfcc340fba66e97 (diff)
downloadlinux-2adc5c559a0770ef75d1647fbf557c7f56194ef8.tar.gz
linux-2adc5c559a0770ef75d1647fbf557c7f56194ef8.tar.bz2
linux-2adc5c559a0770ef75d1647fbf557c7f56194ef8.zip
dm rq: remove unused arguments from rq_completed()
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-rq.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4e06be4f0a62..202e9be5aea7 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -128,7 +128,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+static void rq_completed(struct mapped_device *md)
{
/* nudge anyone waiting on suspend queue */
if (unlikely(waitqueue_active(&md->wait)))
@@ -147,7 +147,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
*/
static void dm_end_request(struct request *clone, blk_status_t error)
{
- int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
@@ -157,7 +156,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
- rq_completed(md, rw, true);
+ rq_completed(md);
}
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
@@ -181,7 +180,6 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
{
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
- int rw = rq_data_dir(rq);
unsigned long delay_ms = delay_requeue ? 100 : 0;
rq_end_stats(md, rq);
@@ -191,7 +189,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
}
dm_mq_delay_requeue_request(rq, delay_ms);
- rq_completed(md, rw, false);
+ rq_completed(md);
}
static void dm_done(struct request *clone, blk_status_t error, bool mapped)
@@ -246,15 +244,13 @@ static void dm_softirq_done(struct request *rq)
bool mapped = true;
struct dm_rq_target_io *tio = tio_from_request(rq);
struct request *clone = tio->clone;
- int rw;
if (!clone) {
struct mapped_device *md = tio->md;
rq_end_stats(md, rq);
- rw = rq_data_dir(rq);
blk_mq_end_request(rq, tio->error);
- rq_completed(md, rw, false);
+ rq_completed(md);
return;
}
@@ -507,7 +503,7 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (map_request(tio) == DM_MAPIO_REQUEUE) {
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
+ rq_completed(md);
return BLK_STS_RESOURCE;
}