summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 12:57:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 12:57:51 -0800
commitac7ac4618cf25e0d5cd8eba83d5f600084b65b9a (patch)
treee5d28907ff72690a0463a2238b96202d751a535c /block/blk-merge.c
parent48aba79bcf6ea05148dc82ad9c40713960b00396 (diff)
parentfa94ba8a7b22890e6a17b39b9359e114fe18cd59 (diff)
downloadlinux-ac7ac4618cf25e0d5cd8eba83d5f600084b65b9a.tar.gz
linux-ac7ac4618cf25e0d5cd8eba83d5f600084b65b9a.tar.bz2
linux-ac7ac4618cf25e0d5cd8eba83d5f600084b65b9a.zip
Merge tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Another series of killing more code than what is being added, again thanks to Christoph's relentless cleanups and tech debt tackling. This contains: - blk-iocost improvements (Baolin Wang) - part0 iostat fix (Jeffle Xu) - Disable iopoll for split bios (Jeffle Xu) - block tracepoint cleanups (Christoph Hellwig) - Merging of struct block_device and hd_struct (Christoph Hellwig) - Rework/cleanup of how block device sizes are updated (Christoph Hellwig) - Simplification of gendisk lookup and removal of block device aliasing (Christoph Hellwig) - Block device ioctl cleanups (Christoph Hellwig) - Removal of bdget()/blkdev_get() as exported API (Christoph Hellwig) - Disk change rework, avoid ->revalidate_disk() (Christoph Hellwig) - sbitmap improvements (Pavel Begunkov) - Hybrid polling fix (Pavel Begunkov) - bvec iteration improvements (Pavel Begunkov) - Zone revalidation fixes (Damien Le Moal) - blk-throttle limit fix (Yu Kuai) - Various little fixes" * tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block: (126 commits) blk-mq: fix msec comment from micro to milli seconds blk-mq: update arg in comment of blk_mq_map_queue blk-mq: add helper allocating tagset->tags Revert "block: Fix a lockdep complaint triggered by request queue flushing" nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class blk-mq: add new API of blk_mq_hctx_set_fq_lock_class block: disable iopoll for split bio block: Improve blk_revalidate_disk_zones() checks sbitmap: simplify wrap check sbitmap: replace CAS with atomic and sbitmap: remove swap_lock sbitmap: optimise sbitmap_deferred_clear() blk-mq: skip hybrid polling if iopoll doesn't spin blk-iocost: Factor out the base vrate change into a separate function blk-iocost: Factor out the active iocgs' state check into a separate function blk-iocost: Move the usage ratio calculation to the correct place blk-iocost: Remove unnecessary advance declaration blk-iocost: Fix some typos in comments blktrace: fix up a kerneldoc comment block: remove the request_queue to argument request based tracepoints ...
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 97b7c2821565..808768f6b174 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -279,6 +279,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
return NULL;
split:
*segs = nsegs;
+
+ /*
+ * Bio splitting may cause subtle trouble such as hang when doing sync
+ * iopoll in direct IO routine. Given performance gain of iopoll for
+ * big IO can be trival, disable iopoll when split needed.
+ */
+ bio->bi_opf &= ~REQ_HIPRI;
+
return bio_split(bio, sectors, GFP_NOIO, bs);
}
@@ -338,7 +346,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
split->bi_opf |= REQ_NOMERGE;
bio_chain(split, *bio);
- trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
+ trace_block_split(split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
}
@@ -683,8 +691,6 @@ static void blk_account_io_merge_request(struct request *req)
part_stat_lock();
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
part_stat_unlock();
-
- hd_struct_put(req->part);
}
}
@@ -801,7 +807,7 @@ static struct request *attempt_merge(struct request_queue *q,
*/
blk_account_io_merge_request(next);
- trace_block_rq_merge(q, next);
+ trace_block_rq_merge(next);
/*
* ownership of bio passed from next to req, return 'next' for
@@ -924,7 +930,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
if (!ll_back_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
- trace_block_bio_backmerge(req->q, req, bio);
+ trace_block_bio_backmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
@@ -948,7 +954,7 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
if (!ll_front_merge_fn(req, bio, nr_segs))
return BIO_MERGE_FAILED;
- trace_block_bio_frontmerge(req->q, req, bio);
+ trace_block_bio_frontmerge(bio);
rq_qos_merge(req->q, req, bio);
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)