diff options
author | Mike Snitzer <snitzer@redhat.com> | 2016-01-31 17:33:32 -0500 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-02-22 11:06:22 -0500 |
commit | c91852ff081561d8793d2927e55f5c5deaea6bff (patch) | |
tree | 84a5ba9004d9ff5fef7116231766f157d0acbe4f /drivers/md | |
parent | 16f122661dbb3dfefc60788b528b54ad702005aa (diff) | |
download | linux-c91852ff081561d8793d2927e55f5c5deaea6bff.tar.gz linux-c91852ff081561d8793d2927e55f5c5deaea6bff.tar.bz2 linux-c91852ff081561d8793d2927e55f5c5deaea6bff.zip |
dm: optimize dm_request_fn()
DM multipath is the only request-based DM target -- which only supports
tables with a single target that is immutable. Leverage this fact in
dm_request_fn().
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm.c | 47 |
1 files changed, 17 insertions, 30 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 35ca9d065760..621450282d08 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2071,12 +2071,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) static void dm_request_fn(struct request_queue *q) { struct mapped_device *md = q->queuedata; - int srcu_idx; - struct dm_table *map = dm_get_live_table(md, &srcu_idx); - struct dm_target *ti; + struct dm_target *ti = md->immutable_target; struct request *rq; struct dm_rq_target_io *tio; - sector_t pos; + sector_t pos = 0; + + if (unlikely(!ti)) { + int srcu_idx; + struct dm_table *map = dm_get_live_table(md, &srcu_idx); + + ti = dm_table_find_target(map, pos); + dm_put_live_table(md, srcu_idx); + } /* * For suspend, check blk_queue_stopped() and increment @@ -2087,33 +2093,21 @@ static void dm_request_fn(struct request_queue *q) while (!blk_queue_stopped(q)) { rq = blk_peek_request(q); if (!rq) - goto out; + return; /* always use block 0 to find the target for flushes for now */ pos = 0; if (!(rq->cmd_flags & REQ_FLUSH)) pos = blk_rq_pos(rq); - ti = dm_table_find_target(map, pos); - if (!dm_target_is_valid(ti)) { - /* - * Must perform setup, that rq_completed() requires, - * before calling dm_kill_unmapped_request - */ - DMERR_LIMIT("request attempted access beyond the end of device"); - dm_start_request(md, rq); - dm_kill_unmapped_request(rq, -EIO); - continue; + if ((dm_request_peeked_before_merge_deadline(md) && + md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && + md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || + (ti->type->busy && ti->type->busy(ti))) { + blk_delay_queue(q, HZ / 100); + return; } - if (dm_request_peeked_before_merge_deadline(md) && - md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && - md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) - goto delay_and_out; - - if (ti->type->busy && ti->type->busy(ti)) - goto delay_and_out; - dm_start_request(md, rq); tio = tio_from_request(rq); @@ -2122,13 +2116,6 @@ static void dm_request_fn(struct request_queue *q) queue_kthread_work(&md->kworker, &tio->work); BUG_ON(!irqs_disabled()); } - - goto out; - -delay_and_out: - blk_delay_queue(q, HZ / 100); -out: - dm_put_live_table(md, srcu_idx); } static int dm_any_congested(void *congested_data, int bdi_bits) |