diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-02 13:10:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-02 13:10:25 -0700 |
commit | 1081230b748de8f03f37f80c53dfa89feda9b8de (patch) | |
tree | 7238d60e01f0843bad8f03b5d84e4220fbba5e76 /block | |
parent | df910390e2db07a76c87f258475f6c96253cee6c (diff) | |
parent | 2ca495ac27d245513c11fed70591b1838250e240 (diff) | |
download | linux-1081230b748de8f03f37f80c53dfa89feda9b8de.tar.gz linux-1081230b748de8f03f37f80c53dfa89feda9b8de.tar.bz2 linux-1081230b748de8f03f37f80c53dfa89feda9b8de.zip |
Merge branch 'for-4.3/core' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe:
"This first core part of the block IO changes contains:
- Cleanup of the bio IO error signaling from Christoph. We used to
rely on the uptodate bit and passing around of an error, now we
store the error in the bio itself.
- Improvement of the above from myself, by shrinking the bio size
down again to fit in two cachelines on x86-64.
- Revert of the max_hw_sectors cap removal from a revision again,
from Jeff Moyer. This caused performance regressions in various
tests. Reinstate the limit, bump it to a more reasonable size
instead.
- Make /sys/block/<dev>/queue/discard_max_bytes writeable, by me.
Most devices have huge trim limits, which can cause nasty latencies
when deleting files. Enable the admin to configure the size down.
We will look into having a more sane default instead of UINT_MAX
sectors.
- Improvement of the SGP gaps logic from Keith Busch.
- Enable the block core to handle arbitrarily sized bios, which
enables a nice simplification of bio_add_page() (which is an IO hot
path). From Kent.
- Improvements to the partition io stats accounting, making it
faster. From Ming Lei.
- Also from Ming Lei, a basic fixup for overflow of the sysfs pending
file in blk-mq, as well as a fix for a blk-mq timeout race
condition.
- Ming Lin has been carrying Kents above mentioned patches forward
for a while, and testing them. Ming also did a few fixes around
that.
- Sasha Levin found and fixed a use-after-free problem introduced by
the bio->bi_error changes from Christoph.
- Small blk cgroup cleanup from Viresh Kumar"
* 'for-4.3/core' of git://git.kernel.dk/linux-block: (26 commits)
blk: Fix bio_io_vec index when checking bvec gaps
block: Replace SG_GAPS with new queue limits mask
block: bump BLK_DEF_MAX_SECTORS to 2560
Revert "block: remove artifical max_hw_sectors cap"
blk-mq: fix race between timeout and freeing request
blk-mq: fix buffer overflow when reading sysfs file of 'pending'
Documentation: update notes in biovecs about arbitrarily sized bios
block: remove bio_get_nr_vecs()
fs: use helper bio_add_page() instead of open coding on bi_io_vec
block: kill merge_bvec_fn() completely
md/raid5: get rid of bio_fits_rdev()
md/raid5: split bio for chunk_aligned_read
block: remove split code in blkdev_issue_{discard,write_same}
btrfs: remove bio splitting and merge_bvec_fn() calls
bcache: remove driver private bio splitting code
block: simplify bio_add_page()
block: make generic_make_request handle arbitrarily sized bios
blk-cgroup: Drop unlikely before IS_ERR(_OR_NULL)
block: don't access bio->bi_error after bio_put()
block: shrink struct bio down to 2 cache lines again
...
Diffstat (limited to 'block')
-rw-r--r-- | block/bio-integrity.c | 11 | ||||
-rw-r--r-- | block/bio.c | 212 | ||||
-rw-r--r-- | block/blk-core.c | 36 | ||||
-rw-r--r-- | block/blk-flush.c | 15 | ||||
-rw-r--r-- | block/blk-lib.c | 77 | ||||
-rw-r--r-- | block/blk-map.c | 4 | ||||
-rw-r--r-- | block/blk-merge.c | 169 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 25 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 4 | ||||
-rw-r--r-- | block/blk-mq-tag.h | 12 | ||||
-rw-r--r-- | block/blk-mq.c | 26 | ||||
-rw-r--r-- | block/blk-settings.c | 44 | ||||
-rw-r--r-- | block/blk-sysfs.c | 43 | ||||
-rw-r--r-- | block/blk.h | 6 | ||||
-rw-r--r-- | block/bounce.c | 29 | ||||
-rw-r--r-- | block/genhd.c | 9 | ||||
-rw-r--r-- | block/partition-generic.c | 12 |
17 files changed, 421 insertions, 313 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 719b7152aed1..4aecca79374a 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -355,13 +355,12 @@ static void bio_integrity_verify_fn(struct work_struct *work) container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - int error; - error = bio_integrity_process(bio, bi->verify_fn); + bio->bi_error = bio_integrity_process(bio, bi->verify_fn); /* Restore original bio completion handler */ bio->bi_end_io = bip->bip_end_io; - bio_endio(bio, error); + bio_endio(bio); } /** @@ -376,7 +375,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) * in process context. This function postpones completion * accordingly. */ -void bio_integrity_endio(struct bio *bio, int error) +void bio_integrity_endio(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); @@ -386,9 +385,9 @@ void bio_integrity_endio(struct bio *bio, int error) * integrity metadata. Restore original bio end_io handler * and run it. */ - if (error) { + if (bio->bi_error) { bio->bi_end_io = bip->bip_end_io; - bio_endio(bio, error); + bio_endio(bio); return; } diff --git a/block/bio.c b/block/bio.c index d6e5ba3399f0..515b5434fe2d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -269,7 +269,6 @@ static void bio_free(struct bio *bio) void bio_init(struct bio *bio) { memset(bio, 0, sizeof(*bio)); - bio->bi_flags = 1 << BIO_UPTODATE; atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_cnt, 1); } @@ -292,14 +291,17 @@ void bio_reset(struct bio *bio) __bio_free(bio); memset(bio, 0, BIO_RESET_BYTES); - bio->bi_flags = flags | (1 << BIO_UPTODATE); + bio->bi_flags = flags; atomic_set(&bio->__bi_remaining, 1); } EXPORT_SYMBOL(bio_reset); -static void bio_chain_endio(struct bio *bio, int error) +static void bio_chain_endio(struct bio *bio) { - bio_endio(bio->bi_private, error); + struct bio *parent = bio->bi_private; + + parent->bi_error = bio->bi_error; + bio_endio(parent); bio_put(bio); } @@ -309,7 +311,7 @@ static void bio_chain_endio(struct bio *bio, int error) */ static inline void bio_inc_remaining(struct bio *bio) { - bio->bi_flags |= (1 << BIO_CHAIN); + bio_set_flag(bio, BIO_CHAIN); smp_mb__before_atomic(); atomic_inc(&bio->__bi_remaining); } @@ -493,7 +495,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) if (unlikely(!bvl)) goto err_free; - bio->bi_flags |= 1 << BIO_OWNS_VEC; + bio_set_flag(bio, BIO_OWNS_VEC); } else if (nr_iovecs) { bvl = bio->bi_inline_vecs; } @@ -578,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) * so we don't set nor calculate new physical/hw segment counts here */ bio->bi_bdev = bio_src->bi_bdev; - bio->bi_flags |= 1 << BIO_CLONED; + bio_set_flag(bio, BIO_CLONED); bio->bi_rw = bio_src->bi_rw; bio->bi_iter = bio_src->bi_iter; bio->bi_io_vec = bio_src->bi_io_vec; @@ -692,31 +694,22 @@ integrity_clone: EXPORT_SYMBOL(bio_clone_bioset); /** - * bio_get_nr_vecs - return approx number of vecs - * @bdev: I/O target + * bio_add_pc_page - attempt to add page to bio + * @q: the target queue + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset * - * Return the approximate number of pages we can send to this target. - * There's no guarantee that you will be able to fit this number of pages - * into a bio, it does not account for dynamic restrictions that vary - * on offset. + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + * + * This should only be used by REQ_PC bios. */ -int bio_get_nr_vecs(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - int nr_pages; - - nr_pages = min_t(unsigned, - queue_max_segments(q), - queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); - - return min_t(unsigned, nr_pages, BIO_MAX_PAGES); - -} -EXPORT_SYMBOL(bio_get_nr_vecs); - -static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page - *page, unsigned int len, unsigned int offset, - unsigned int max_sectors) +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page + *page, unsigned int len, unsigned int offset) { int retried_segments = 0; struct bio_vec *bvec; @@ -727,7 +720,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page if (unlikely(bio_flagged(bio, BIO_CLONED))) return 0; - if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) + if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) return 0; /* @@ -740,28 +733,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page if (page == prev->bv_page && offset == prev->bv_offset + prev->bv_len) { - unsigned int prev_bv_len = prev->bv_len; prev->bv_len += len; - - if (q->merge_bvec_fn) { - struct bvec_merge_data bvm = { - /* prev_bvec is already charged in - bi_size, discharge it in order to - simulate merging updated prev_bvec - as new bvec. */ - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_iter.bi_sector, - .bi_size = bio->bi_iter.bi_size - - prev_bv_len, - .bi_rw = bio->bi_rw, - }; - - if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { - prev->bv_len -= len; - return 0; - } - } - bio->bi_iter.bi_size += len; goto done; } @@ -770,8 +742,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. */ - if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && - bvec_gap_to_prev(prev, offset)) + if (bvec_gap_to_prev(q, prev, offset)) return 0; } @@ -804,30 +775,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page blk_recount_segments(q, bio); } - /* - * if queue has other restrictions (eg varying max sector size - * depending on offset), it can specify a merge_bvec_fn in the - * queue to get further control - */ - if (q->merge_bvec_fn) { - struct bvec_merge_data bvm = { - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_iter.bi_sector, - .bi_size = bio->bi_iter.bi_size - len, - .bi_rw = bio->bi_rw, - }; - - /* - * merge_bvec_fn() returns number of bytes it can accept - * at this offset - */ - if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) - goto failed; - } - /* If we may be able to merge these biovecs, force a recount */ if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) - bio->bi_flags &= ~(1 << BIO_SEG_VALID); + bio_clear_flag(bio, BIO_SEG_VALID); done: return len; @@ -841,28 +791,6 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page blk_recount_segments(q, bio); return 0; } - -/** - * bio_add_pc_page - attempt to add page to bio - * @q: the target queue - * @bio: destination bio - * @page: page to add - * @len: vec entry length - * @offset: vec entry offset - * - * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block device - * limitations. The target block device must allow bio's up to PAGE_SIZE, - * so it is always possible to add a single page to an empty bio. - * - * This should only be used by REQ_PC bios. - */ -int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) -{ - return __bio_add_page(q, bio, page, len, offset, - queue_max_hw_sectors(q)); -} EXPORT_SYMBOL(bio_add_pc_page); /** @@ -872,22 +800,47 @@ EXPORT_SYMBOL(bio_add_pc_page); * @len: vec entry length * @offset: vec entry offset * - * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block device - * limitations. The target block device must allow bio's up to PAGE_SIZE, - * so it is always possible to add a single page to an empty bio. + * Attempt to add a page to the bio_vec maplist. This will only fail + * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. */ -int bio_add_page(struct bio *bio, struct page *page, unsigned int len, - unsigned int offset) +int bio_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - unsigned int max_sectors; + struct bio_vec *bv; + + /* + * cloned bio must not modify vec list + */ + if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) + return 0; - max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); - if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) - max_sectors = len >> 9; + /* + * For filesystems with a blocksize smaller than the pagesize + * we will often be called with the same page as last time and + * a consecutive offset. Optimize this special case. + */ + if (bio->bi_vcnt > 0) { + bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; - return __bio_add_page(q, bio, page, len, offset, max_sectors); + if (page == bv->bv_page && + offset == bv->bv_offset + bv->bv_len) { + bv->bv_len += len; + goto done; + } + } + + if (bio->bi_vcnt >= bio->bi_max_vecs) + return 0; + + bv = &bio->bi_io_vec[bio->bi_vcnt]; + bv->bv_page = page; + bv->bv_len = len; + bv->bv_offset = offset; + + bio->bi_vcnt++; +done: + bio->bi_iter.bi_size += len; + return len; } EXPORT_SYMBOL(bio_add_page); @@ -896,11 +849,11 @@ struct submit_bio_ret { int error; }; -static void submit_bio_wait_endio(struct bio *bio, int error) +static void submit_bio_wait_endio(struct bio *bio) { struct submit_bio_ret *ret = bio->bi_private; - ret->error = error; + ret->error = bio->bi_error; complete(&ret->event); } @@ -1388,7 +1341,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, if (iter->type & WRITE) bio->bi_rw |= REQ_WRITE; - bio->bi_flags |= (1 << BIO_USER_MAPPED); + bio_set_flag(bio, BIO_USER_MAPPED); /* * subtle -- if __bio_map_user() ended up bouncing a bio, @@ -1445,7 +1398,7 @@ void bio_unmap_user(struct bio *bio) } EXPORT_SYMBOL(bio_unmap_user); -static void bio_map_kern_endio(struct bio *bio, int err) +static void bio_map_kern_endio(struct bio *bio) { bio_put(bio); } @@ -1501,13 +1454,13 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, } EXPORT_SYMBOL(bio_map_kern); -static void bio_copy_kern_endio(struct bio *bio, int err) +static void bio_copy_kern_endio(struct bio *bio) { bio_free_pages(bio); bio_put(bio); } -static void bio_copy_kern_endio_read(struct bio *bio, int err) +static void bio_copy_kern_endio_read(struct bio *bio) { char *p = bio->bi_private; struct bio_vec *bvec; @@ -1518,7 +1471,7 @@ static void bio_copy_kern_endio_read(struct bio *bio, int err) p += bvec->bv_len; } - bio_copy_kern_endio(bio, err); + bio_copy_kern_endio(bio); } /** @@ -1768,7 +1721,7 @@ static inline bool bio_remaining_done(struct bio *bio) BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); if (atomic_dec_and_test(&bio->__bi_remaining)) { - clear_bit(BIO_CHAIN, &bio->bi_flags); + bio_clear_flag(bio, BIO_CHAIN); return true; } @@ -1778,25 +1731,15 @@ static inline bool bio_remaining_done(struct bio *bio) /** * bio_endio - end I/O on a bio * @bio: bio - * @error: error, if any * * Description: - * bio_endio() will end I/O on the whole bio. bio_endio() is the - * preferred way to end I/O on a bio, it takes care of clearing - * BIO_UPTODATE on error. @error is 0 on success, and and one of the - * established -Exxxx (-EIO, for instance) error values in case - * something went wrong. No one should call bi_end_io() directly on a - * bio unless they own it and thus know that it has an end_io - * function. + * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred + * way to end I/O on a bio. No one should call bi_end_io() directly on a + * bio unless they own it and thus know that it has an end_io function. **/ -void bio_endio(struct bio *bio, int error) +void bio_endio(struct bio *bio) { while (bio) { - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - if (unlikely(!bio_remaining_done(bio))) break; @@ -1810,11 +1753,12 @@ void bio_endio(struct bio *bio, int error) */ if (bio->bi_end_io == bio_chain_endio) { struct bio *parent = bio->bi_private; + parent->bi_error = bio->bi_error; bio_put(bio); bio = parent; } else { if (bio->bi_end_io) - bio->bi_end_io(bio, error); + bio->bi_end_io(bio); bio = NULL; } } @@ -1882,7 +1826,7 @@ void bio_trim(struct bio *bio, int offset, int size) if (offset == 0 && size == bio->bi_iter.bi_size) return; - clear_bit(BIO_SEG_VALID, &bio->bi_flags); + bio_clear_flag(bio, BIO_SEG_VALID); bio_advance(bio, offset << 9); diff --git a/block/blk-core.c b/block/blk-core.c index 627ed0c593fb..60912e983f16 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -143,18 +143,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; + bio->bi_error = error; if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); + bio_set_flag(bio, BIO_QUIET); bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); + bio_endio(bio); } void blk_dump_rq_flags(struct request *rq, char *msg) @@ -645,6 +643,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (q->id < 0) goto fail_q; + q->bio_split = bioset_create(BIO_POOL_SIZE, 0); + if (!q->bio_split) + goto fail_id; + q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; @@ -653,7 +655,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) err = bdi_init(&q->backing_dev_info); if (err) - goto fail_id; + goto fail_split; setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); @@ -695,6 +697,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) fail_bdi: bdi_destroy(&q->backing_dev_info); +fail_split: + bioset_free(q->bio_split); fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: @@ -1612,6 +1616,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio) struct request *req; unsigned int request_count = 0; + blk_queue_split(q, &bio, q->bio_split); + /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even @@ -1620,7 +1626,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio_endio(bio, -EIO); + bio->bi_error = -EIO; + bio_endio(bio); return; } @@ -1673,7 +1680,8 @@ get_rq: */ req = get_request(q, rw_flags, bio, GFP_NOIO); if (IS_ERR(req)) { - bio_endio(bio, PTR_ERR(req)); /* @q is dead */ + bio->bi_error = PTR_ERR(req); + bio_endio(bio); goto out_unlock; } @@ -1832,15 +1840,6 @@ generic_make_request_checks(struct bio *bio) goto end_io; } - if (likely(bio_is_rw(bio) && - nr_sectors > queue_max_hw_sectors(q))) { - printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - queue_max_hw_sectors(q)); - goto end_io; - } - part = bio->bi_bdev->bd_part; if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, @@ -1896,7 +1895,8 @@ generic_make_request_checks(struct bio *bio) return true; end_io: - bio_endio(bio, err); + bio->bi_error = err; + bio_endio(bio); return false; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 20badd7b9d1b..9c423e53324a 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -73,6 +73,7 @@ #include "blk.h" #include "blk-mq.h" +#include "blk-mq-tag.h" /* FLUSH/FUA sequences */ enum { @@ -226,7 +227,12 @@ static void flush_end_io(struct request *flush_rq, int error) struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + + /* release the tag's ownership to the req cloned from */ spin_lock_irqsave(&fq->mq_flush_lock, flags); + hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); + blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); flush_rq->tag = -1; } @@ -308,11 +314,18 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) /* * Borrow tag from the first request since they can't - * be in flight at the same time. + * be in flight at the same time. And acquire the tag's + * ownership for flush req. */ if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->tag = first_rq->tag; + fq->orig_rq = first_rq; + + hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); + blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); } flush_rq->cmd_type = REQ_TYPE_FS; diff --git a/block/blk-lib.c b/block/blk-lib.c index 7688ee3f5d72..bd40292e5009 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -11,21 +11,28 @@ struct bio_batch { atomic_t done; - unsigned long flags; + int error; struct completion *wait; }; -static void bio_batch_end_io(struct bio *bio, int err) +static void bio_batch_end_io(struct bio *bio) { struct bio_batch *bb = bio->bi_private; - if (err && (err != -EOPNOTSUPP)) - clear_bit(BIO_UPTODATE, &bb->flags); + if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) + bb->error = bio->bi_error; if (atomic_dec_and_test(&bb->done)) complete(bb->wait); bio_put(bio); } +/* + * Ensure that max discard sectors doesn't overflow bi_size and hopefully + * it is of the proper granularity as long as the granularity is a power + * of two. + */ +#define MAX_BIO_SECTORS ((1U << 31) >> 9) + /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for @@ -43,8 +50,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; - unsigned int max_discard_sectors, granularity; - int alignment; struct bio_batch bb; struct bio *bio; int ret = 0; @@ -56,21 +61,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!blk_queue_discard(q)) return -EOPNOTSUPP; - /* Zero-sector (unknown) and one-sector granularities are the same. */ - granularity = max(q->limits.discard_granularity >> 9, 1U); - alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; - - /* - * Ensure that max_discard_sectors is of the proper - * granularity, so that requests stay aligned after a split. - */ - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); - max_discard_sectors -= max_discard_sectors % granularity; - if (unlikely(!max_discard_sectors)) { - /* Avoid infinite loop below. Being cautious never hurts. */ - return -EOPNOTSUPP; - } - if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; @@ -78,13 +68,13 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, } atomic_set(&bb.done, 1); - bb.flags = 1 << BIO_UPTODATE; + bb.error = 0; bb.wait = &wait; blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; - sector_t end_sect, tmp; + sector_t end_sect; bio = bio_alloc(gfp_mask, 1); if (!bio) { @@ -92,21 +82,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, break; } - req_sects = min_t(sector_t, nr_sects, max_discard_sectors); - - /* - * If splitting a request, and the next starting sector would be - * misaligned, stop the discard at the previous aligned sector. - */ + req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS); end_sect = sector + req_sects; - tmp = end_sect; - if (req_sects < nr_sects && - sector_div(tmp, granularity) != alignment) { - end_sect = end_sect - alignment; - sector_div(end_sect, granularity); - end_sect = end_sect * granularity + alignment; - req_sects = end_sect - sector; - } bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; @@ -134,9 +111,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!atomic_dec_and_test(&bb.done)) wait_for_completion_io(&wait); - if (!test_bit(BIO_UPTODATE, &bb.flags)) - ret = -EIO; - + if (bb.error) + return bb.error; return ret; } EXPORT_SYMBOL(blkdev_issue_discard); @@ -166,13 +142,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, if (!q) return -ENXIO; - max_write_same_sectors = q->limits.max_write_same_sectors; - - if (max_write_same_sectors == 0) - return -EOPNOTSUPP; + /* Ensure that max_write_same_sectors doesn't overflow bi_size */ + max_write_same_sectors = UINT_MAX >> 9; atomic_set(&bb.done, 1); - bb.flags = 1 << BIO_UPTODATE; + bb.error = 0; bb.wait = &wait; while (nr_sects) { @@ -208,9 +182,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, if (!atomic_dec_and_test(&bb.done)) wait_for_completion_io(&wait); - if (!test_bit(BIO_UPTODATE, &bb.flags)) - ret = -ENOTSUPP; - + if (bb.error) + return bb.error; return ret; } EXPORT_SYMBOL(blkdev_issue_write_same); @@ -236,7 +209,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, DECLARE_COMPLETION_ONSTACK(wait); atomic_set(&bb.done, 1); - bb.flags = 1 << BIO_UPTODATE; + bb.error = 0; bb.wait = &wait; ret = 0; @@ -270,10 +243,8 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, if (!atomic_dec_and_test(&bb.done)) wait_for_completion_io(&wait); - if (!test_bit(BIO_UPTODATE, &bb.flags)) - /* One of bios in the batch was completed with error.*/ - ret = -EIO; - + if (bb.error) + return bb.error; return ret; } diff --git a/block/blk-map.c b/block/blk-map.c index da310a105429..233841644c9d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -94,7 +94,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return PTR_ERR(bio); if (map_data && map_data->null_mapped) - bio->bi_flags |= (1 << BIO_NULL_MAPPED); + bio_set_flag(bio, BIO_NULL_MAPPED); if (bio->bi_iter.bi_size != iter->count) { /* @@ -103,7 +103,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, * normal IO completion path */ bio_get(bio); - bio_endio(bio, 0); + bio_endio(bio); __blk_rq_unmap_user(bio); return -EINVAL; } diff --git a/block/blk-merge.c b/block/blk-merge.c index 30a0d9f89017..b2625271a572 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,12 +9,146 @@ #include "blk.h" +static struct bio *blk_bio_discard_split(struct request_queue *q, + struct bio *bio, + struct bio_set *bs) +{ + unsigned int max_discard_sectors, granularity; + int alignment; + sector_t tmp; + unsigned split_sectors; + + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + + max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors -= max_discard_sectors % granularity; + + if (unlikely(!max_discard_sectors)) { + /* XXX: warn */ + return NULL; + } + + if (bio_sectors(bio) <= max_discard_sectors) + return NULL; + + split_sectors = max_discard_sectors; + + /* + * If the next starting sector would be misaligned, stop the discard at + * the previous aligned sector. + */ + alignment = (q->limits.discard_alignment >> 9) % granularity; + + tmp = bio->bi_iter.bi_sector + split_sectors - alignment; + tmp = sector_div(tmp, granularity); + + if (split_sectors > tmp) + split_sectors -= tmp; + + return bio_split(bio, split_sectors, GFP_NOIO, bs); +} + +static struct bio *blk_bio_write_same_split(struct request_queue *q, + struct bio *bio, + struct bio_set *bs) +{ + if (!q->limits.max_write_same_sectors) + return NULL; + + if (bio_sectors(bio) <= q->limits.max_write_same_sectors) + return NULL; + + return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); +} + +static struct bio *blk_bio_segment_split(struct request_queue *q, + struct bio *bio, + struct bio_set *bs) +{ + struct bio *split; + struct bio_vec bv, bvprv; + struct bvec_iter iter; + unsigned seg_size = 0, nsegs = 0, sectors = 0; + int prev = 0; + + bio_for_each_segment(bv, bio, iter) { + sectors += bv.bv_len >> 9; + + if (sectors > queue_max_sectors(q)) + goto split; + + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset)) + goto split; + + if (prev && blk_queue_cluster(q)) { + if (seg_size + bv.bv_len > queue_max_segment_size(q)) + goto new_segment; + if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) + goto new_segment; + + seg_size += bv.bv_len; + bvprv = bv; + prev = 1; + continue; + } +new_segment: + if (nsegs == queue_max_segments(q)) + goto split; + + nsegs++; + bvprv = bv; + prev = 1; + seg_size = bv.bv_len; + } + + return NULL; +split: + split = bio_clone_bioset(bio, GFP_NOIO, bs); + + split->bi_iter.bi_size -= iter.bi_size; + bio->bi_iter = iter; + + if (bio_integrity(bio)) { + bio_integrity_advance(bio, split->bi_iter.bi_size); + bio_integrity_trim(split, 0, bio_sectors(split)); + } + + return split; +} + +void blk_queue_split(struct request_queue *q, struct bio **bio, + struct bio_set *bs) +{ + struct bio *split; + + if ((*bio)->bi_rw & REQ_DISCARD) + split = blk_bio_discard_split(q, *bio, bs); + else if ((*bio)->bi_rw & REQ_WRITE_SAME) + split = blk_bio_write_same_split(q, *bio, bs); + else + split = blk_bio_segment_split(q, *bio, q->bio_split); + + if (split) { + bio_chain(split, *bio); + generic_make_request(*bio); + *bio = split; + } +} +EXPORT_SYMBOL(blk_queue_split); + static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio, bool no_sg_merge) { struct bio_vec bv, bvprv = { NULL }; - int cluster, high, highprv = 1; + int cluster, prev = 0; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -36,7 +170,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; - high = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { /* @@ -46,13 +179,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, if (no_sg_merge) goto new_segment; - /* - * the trick here is making sure that a high page is - * never considered part of another segment, since - * that might change with the bounce page. - */ - high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); - if (!high && !highprv && cluster) { + if (prev && cluster) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) goto new_segment; @@ -72,8 +199,8 @@ new_segment: nr_phys_segs++; bvprv = bv; + prev = 1; seg_size = bv.bv_len; - highprv = high; } bbio = bio; } @@ -116,7 +243,7 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) bio->bi_next = nxt; } - bio->bi_flags |= (1 << BIO_SEG_VALID); + bio_set_flag(bio, BIO_SEG_VALID); } EXPORT_SYMBOL(blk_recount_segments); @@ -356,12 +483,12 @@ static bool req_no_special_merge(struct request *req) return !q->mq_ops && req->special; } -static int req_gap_to_prev(struct request *req, struct request *next) +static int req_gap_to_prev(struct request *req, struct bio *next) { struct bio *prev = req->biotail; - return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1], - next->bio->bi_io_vec[0].bv_offset); + return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1], + next->bi_io_vec[0].bv_offset); } static int ll_merge_requests_fn(struct request_queue *q, struct request *req, @@ -378,8 +505,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, if (req_no_special_merge(req) || req_no_special_merge(next)) return 0; - if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) && - req_gap_to_prev(req, next)) + if (req_gap_to_prev(req, next->bio)) return 0; /* @@ -564,8 +690,6 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, bool blk_rq_merge_ok(struct request *rq, struct bio *bio) { - struct request_queue *q = rq->q; - if (!rq_mergeable(rq) || !bio_mergeable(bio)) return false; @@ -590,13 +714,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) return false; /* Only check gaps if the bio carries data */ - if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) { - struct bio_vec *bprev; - - bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1]; - if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) - return false; - } + if (bio_has_data(bio) && req_gap_to_prev(rq, bio)) + return false; return true; } diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index b79685e06b70..279c5d674edf 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) { - char *start_page = page; struct request *rq; + int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg); + + list_for_each_entry(rq, list, queuelist) { + const int rq_len = 2 * sizeof(rq) + 2; + + /* if the output will be truncated */ + if (PAGE_SIZE - 1 < len + rq_len) { + /* backspacing if it can't hold '\t...\n' */ + if (PAGE_SIZE - 1 < len + 5) + len -= rq_len; + len += snprintf(page + len, PAGE_SIZE - 1 - len, + "\t...\n"); + break; + } + len += snprintf(page + len, PAGE_SIZE - 1 - len, + "\t%p\n", rq); + } - page += sprintf(page, "%s:\n", msg); - - list_for_each_entry(rq, list, queuelist) - page += sprintf(page, "\t%p\n", rq); - - return page - start_page; + return len; } static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9b6e28830b82..9115c6d59948 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -429,7 +429,7 @@ static void bt_for_each(struct blk_mq_hw_ctx *hctx, for (bit = find_first_bit(&bm->word, bm->depth); bit < bm->depth; bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { - rq = blk_mq_tag_to_rq(hctx->tags, off + bit); + rq = hctx->tags->rqs[off + bit]; if (rq->q == hctx->queue) fn(hctx, rq, data, reserved); } @@ -453,7 +453,7 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, for (bit = find_first_bit(&bm->word, bm->depth); bit < bm->depth; bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { - rq = blk_mq_tag_to_rq(tags, off + bit); + rq = tags->rqs[off + bit]; fn(rq, data, reserved); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 75893a34237d..9eb2cf4f01cb 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -89,4 +89,16 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) __blk_mq_tag_idle(hctx); } +/* + * This helper should only be used for flush request to share tag + * with the request cloned from, and both the two requests can't be + * in flight at the same time. The caller has to make sure the tag + * can't be freed. + */ +static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, + unsigned int tag, struct request *rq) +{ + hctx->tags->rqs[tag] = rq; +} + #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 7d842db59699..f2d67b4047a0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -559,23 +559,9 @@ void blk_mq_abort_requeue_list(struct request_queue *q) } EXPORT_SYMBOL(blk_mq_abort_requeue_list); -static inline bool is_flush_request(struct request *rq, - struct blk_flush_queue *fq, unsigned int tag) -{ - return ((rq->cmd_flags & REQ_FLUSH_SEQ) && - fq->flush_rq->tag == tag); -} - struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { - struct request *rq = tags->rqs[tag]; - /* mq_ctx of flush rq is always cloned from the corresponding req */ - struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); - - if (!is_flush_request(rq, fq, tag)) - return rq; - - return fq->flush_rq; + return tags->rqs[tag]; } EXPORT_SYMBOL(blk_mq_tag_to_rq); @@ -1199,7 +1185,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, struct blk_mq_alloc_data alloc_data; if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) { - bio_endio(bio, -EIO); + bio_io_error(bio); return NULL; } @@ -1283,10 +1269,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio_endio(bio, -EIO); + bio_io_error(bio); return; } + blk_queue_split(q, &bio, q->bio_split); + if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return; @@ -1368,10 +1356,12 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio_endio(bio, -EIO); + bio_io_error(bio); return; } + blk_queue_split(q, &bio, q->bio_split); + if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, &request_count, NULL)) return; diff --git a/block/blk-settings.c b/block/blk-settings.c index e0057d035200..7d8f129a1516 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -53,28 +53,6 @@ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) } EXPORT_SYMBOL(blk_queue_unprep_rq); -/** - * blk_queue_merge_bvec - set a merge_bvec function for queue - * @q: queue - * @mbfn: merge_bvec_fn - * - * Usually queues have static limitations on the max sectors or segments that - * we can put in a request. Stacking drivers may have some settings that - * are dynamic, and thus we have to query the queue whether it is ok to - * add a new bio_vec to a bio at a given offset or not. If the block device - * has such limitations, it needs to register a merge_bvec_fn to control - * the size of bio's sent to it. Note that a block device *must* allow a - * single page to be added to an empty bio. The block device driver may want - * to use the bio_split() function to deal with these bio's. By default - * no merge_bvec_fn is defined for a queue, and only the fixed limits are - * honored. - */ -void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) -{ - q->merge_bvec_fn = mbfn; -} -EXPORT_SYMBOL(blk_queue_merge_bvec); - void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; @@ -111,11 +89,13 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_segments = BLK_MAX_SEGMENTS; lim->max_integrity_segments = 0; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->virt_boundary_mask = 0; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->chunk_sectors = 0; lim->max_write_same_sectors = 0; lim->max_discard_sectors = 0; + lim->max_hw_discard_sectors = 0; lim->discard_granularity = 0; lim->discard_alignment = 0; lim->discard_misaligned = 0; @@ -257,7 +237,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_ __func__, max_hw_sectors); } - limits->max_sectors = limits->max_hw_sectors = max_hw_sectors; + limits->max_hw_sectors = max_hw_sectors; + limits->max_sectors = min_t(unsigned int, max_hw_sectors, + BLK_DEF_MAX_SECTORS); } EXPORT_SYMBOL(blk_limits_max_hw_sectors); @@ -303,6 +285,7 @@ EXPORT_SYMBOL(blk_queue_chunk_sectors); void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors) { + q->limits.max_hw_discard_sectors = max_discard_sectors; q->limits.max_discard_sectors = max_discard_sectors; } EXPORT_SYMBOL(blk_queue_max_discard_sectors); @@ -550,6 +533,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); + t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, + b->virt_boundary_mask); t->max_segments = min_not_zero(t->max_segments, b->max_segments); t->max_integrity_segments = min_not_zero(t->max_integrity_segments, @@ -641,6 +626,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_discard_sectors = min_not_zero(t->max_discard_sectors, b->max_discard_sectors); + t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, + b->max_hw_discard_sectors); t->discard_granularity = max(t->discard_granularity, b->discard_granularity); t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % @@ -788,6 +775,17 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) EXPORT_SYMBOL(blk_queue_segment_boundary); /** + * blk_queue_virt_boundary - set boundary rules for bio merging + * @q: the request queue for the device + * @mask: the memory boundary mask + **/ +void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) +{ + q->limits.virt_boundary_mask = mask; +} +EXPORT_SYMBOL(blk_queue_virt_boundary); + +/** * blk_queue_dma_alignment - set dma length and memory alignment * @q: the request queue for the device * @mask: alignment mask diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6264b382d4d1..3e44a9da2a13 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -145,12 +145,43 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag return queue_var_show(q->limits.discard_granularity, page); } +static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) +{ + unsigned long long val; + + val = q->limits.max_hw_discard_sectors << 9; + return sprintf(page, "%llu\n", val); +} + static ssize_t queue_discard_max_show(struct request_queue *q, char *page) { return sprintf(page, "%llu\n", (unsigned long long)q->limits.max_discard_sectors << 9); } +static ssize_t queue_discard_max_store(struct request_queue *q, + const char *page, size_t count) +{ + unsigned long max_discard; + ssize_t ret = queue_var_store(&max_discard, page, count); + + if (ret < 0) + return ret; + + if (max_discard & (q->limits.discard_granularity - 1)) + return -EINVAL; + + max_discard >>= 9; + if (max_discard > UINT_MAX) + return -EINVAL; + + if (max_discard > q->limits.max_hw_discard_sectors) + max_discard = q->limits.max_hw_discard_sectors; + + q->limits.max_discard_sectors = max_discard; + return ret; +} + static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) { return queue_var_show(queue_discard_zeroes_data(q), page); @@ -360,9 +391,15 @@ static struct queue_sysfs_entry queue_discard_granularity_entry = { .show = queue_discard_granularity_show, }; +static struct queue_sysfs_entry queue_discard_max_hw_entry = { + .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, + .show = queue_discard_max_hw_show, +}; + static struct queue_sysfs_entry queue_discard_max_entry = { - .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, + .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, .show = queue_discard_max_show, + .store = queue_discard_max_store, }; static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { @@ -421,6 +458,7 @@ static struct attribute *default_attrs[] = { &queue_io_opt_entry.attr, &queue_discard_granularity_entry.attr, &queue_discard_max_entry.attr, + &queue_discard_max_hw_entry.attr, &queue_discard_zeroes_data_entry.attr, &queue_write_same_max_entry.attr, &queue_nonrot_entry.attr, @@ -523,6 +561,9 @@ static void blk_release_queue(struct kobject *kobj) blk_trace_shutdown(q); + if (q->bio_split) + bioset_free(q->bio_split); + ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); } diff --git a/block/blk.h b/block/blk.h index 026d9594142b..838188b35a83 100644 --- a/block/blk.h +++ b/block/blk.h @@ -22,6 +22,12 @@ struct blk_flush_queue { struct list_head flush_queue[2]; struct list_head flush_data_in_flight; struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; spinlock_t mq_flush_lock; }; diff --git a/block/bounce.c b/block/bounce.c index b17311227c12..2c310ea007ee 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -123,7 +123,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) } } -static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) +static void bounce_end_io(struct bio *bio, mempool_t *pool) { struct bio *bio_orig = bio->bi_private; struct bio_vec *bvec, *org_vec; @@ -141,39 +141,40 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) mempool_free(bvec->bv_page, pool); } - bio_endio(bio_orig, err); + bio_orig->bi_error = bio->bi_error; + bio_endio(bio_orig); bio_put(bio); } -static void bounce_end_io_write(struct bio *bio, int err) +static void bounce_end_io_write(struct bio *bio) { - bounce_end_io(bio, page_pool, err); + bounce_end_io(bio, page_pool); } -static void bounce_end_io_write_isa(struct bio *bio, int err) +static void bounce_end_io_write_isa(struct bio *bio) { - bounce_end_io(bio, isa_page_pool, err); + bounce_end_io(bio, isa_page_pool); } -static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) { struct bio *bio_orig = bio->bi_private; - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (!bio->bi_error) copy_to_high_bio_irq(bio_orig, bio); - bounce_end_io(bio, pool, err); + bounce_end_io(bio, pool); } -static void bounce_end_io_read(struct bio *bio, int err) +static void bounce_end_io_read(struct bio *bio) { - __bounce_end_io_read(bio, page_pool, err); + __bounce_end_io_read(bio, page_pool); } -static void bounce_end_io_read_isa(struct bio *bio, int err) +static void bounce_end_io_read_isa(struct bio *bio) { - __bounce_end_io_read(bio, isa_page_pool, err); + __bounce_end_io_read(bio, isa_page_pool); } #ifdef CONFIG_NEED_BOUNCE_POOL @@ -185,7 +186,7 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) return 0; - return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); + return bio_flagged(bio, BIO_SNAP_STABLE); } #else static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) diff --git a/block/genhd.c b/block/genhd.c index 59a1395eedac..0c706f33a599 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1110,8 +1110,7 @@ static void disk_release(struct device *dev) disk_release_events(disk); kfree(disk->random); disk_replace_part_tbl(disk, NULL); - free_part_stats(&disk->part0); - free_part_info(&disk->part0); + hd_free_part(&disk->part0); if (disk->queue) blk_put_queue(disk->queue); kfree(disk); @@ -1285,7 +1284,11 @@ struct gendisk *alloc_disk_node(int minors, int node_id) * converted to make use of bd_mutex and sequence counters. */ seqcount_init(&disk->part0.nr_sects_seq); - hd_ref_init(&disk->part0); + if (hd_ref_init(&disk->part0)) { + hd_free_part(&disk->part0); + kfree(disk); + return NULL; + } disk->minors = minors; rand_initialize_disk(disk); diff --git a/block/partition-generic.c b/block/partition-generic.c index 0d9e5f97f0a8..e7711133284e 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -212,8 +212,7 @@ static void part_release(struct device *dev) { struct hd_struct *p = dev_to_part(dev); blk_free_devt(dev->devt); - free_part_stats(p); - free_part_info(p); + hd_free_part(p); kfree(p); } @@ -233,8 +232,9 @@ static void delete_partition_rcu_cb(struct rcu_head *head) put_device(part_to_dev(part)); } -void __delete_partition(struct hd_struct *part) +void __delete_partition(struct percpu_ref *ref) { + struct hd_struct *part = container_of(ref, struct hd_struct, ref); call_rcu(&part->rcu_head, delete_partition_rcu_cb); } @@ -255,7 +255,7 @@ void delete_partition(struct gendisk *disk, int partno) kobject_put(part->holder_dir); device_del(part_to_dev(part)); - hd_struct_put(part); + hd_struct_kill(part); } static ssize_t whole_disk_show(struct device *dev, @@ -356,8 +356,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); - hd_ref_init(p); - return p; + if (!hd_ref_init(p)) + return p; out_free_info: free_part_info(p); |