From c2b4bb8cb3741c0bacf3683e4c1ecd04c977ada3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 23 Aug 2020 11:10:42 +0200 Subject: block: fix locking for struct block_device size updates Two different callers use two different mutexes for updating the block device size, which obviously doesn't help to actually protect against concurrent updates from the different callers. In addition one of the locks, bd_mutex is rather prone to deadlocks with other parts of the block stack that use it for high level synchronization. Switch to using a new spinlock protecting just the size updates, as that is all we need, and make sure everyone does the update through the proper helper. This fixes a bug reported with the nvme revalidating disks during a hot removal operation, which can currently deadlock on bd_mutex. Reported-by: Xianting Tian Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- block/partitions/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/partitions/core.c b/block/partitions/core.c index 5b4869c08fb3..b1c0b50ca92d 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -592,8 +592,8 @@ int bdev_resize_partition(struct block_device *bdev, int partno, if (partition_overlaps(bdev->bd_disk, start, length, partno)) goto out_unlock; - part_nr_sects_write(part, (sector_t)length); - i_size_write(bdevp->bd_inode, length << SECTOR_SHIFT); + part_nr_sects_write(part, length); + bd_set_nr_sectors(bdevp, length); ret = 0; out_unlock: -- cgit v1.2.3 From f3256075ba49d80835b601bfbff350a2140b2924 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Aug 2020 17:37:45 +0200 Subject: block: remove the BIO_NULL_MAPPED flag We can simply use a boolean flag in the bio_map_data data structure instead. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-map.c b/block/blk-map.c index 6e804892d5ec..51e6195f878d 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -12,7 +12,8 @@ #include "blk.h" struct bio_map_data { - int is_our_pages; + bool is_our_pages : 1; + bool is_null_mapped : 1; struct iov_iter iter; struct iovec iov[]; }; @@ -108,7 +109,7 @@ static int bio_uncopy_user(struct bio *bio) struct bio_map_data *bmd = bio->bi_private; int ret = 0; - if (!bio_flagged(bio, BIO_NULL_MAPPED)) { + if (!bmd || !bmd->is_null_mapped) { /* * if we're in a workqueue, the request is orphaned, so * don't copy into a random user address space, just free @@ -158,7 +159,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, * The caller provided iov might point to an on-stack or otherwise * shortlived one. */ - bmd->is_our_pages = map_data ? 0 : 1; + bmd->is_our_pages = !map_data; nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); if (nr_pages > BIO_MAX_PAGES) @@ -234,7 +235,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, bio->bi_private = bmd; if (map_data && map_data->null_mapped) - bio_set_flag(bio, BIO_NULL_MAPPED); + bmd->is_null_mapped = true; return bio; cleanup: if (!map_data) -- cgit v1.2.3 From 7b63c052a580d7d5ed83a6e2a327e85881fa679a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Aug 2020 17:37:46 +0200 Subject: block: remove __blk_rq_unmap_user Open code __blk_rq_unmap_user in the two callers. Both never pass a NULL bio, and one of them can use an existing local variable instead of the bio flag. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) (limited to 'block') diff --git a/block/blk-map.c b/block/blk-map.c index 51e6195f878d..10de4809edf9 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -558,20 +558,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio) } EXPORT_SYMBOL(blk_rq_append_bio); -static int __blk_rq_unmap_user(struct bio *bio) -{ - int ret = 0; - - if (bio) { - if (bio_flagged(bio, BIO_USER_MAPPED)) - bio_unmap_user(bio); - else - ret = bio_uncopy_user(bio); - } - - return ret; -} - static int __blk_rq_map_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask, bool copy) @@ -599,7 +585,10 @@ static int __blk_rq_map_user_iov(struct request *rq, */ ret = blk_rq_append_bio(rq, &bio); if (ret) { - __blk_rq_unmap_user(orig_bio); + if (copy) + bio_uncopy_user(orig_bio); + else + bio_unmap_user(orig_bio); return ret; } bio_get(bio); @@ -701,9 +690,13 @@ int blk_rq_unmap_user(struct bio *bio) if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; - ret2 = __blk_rq_unmap_user(mapped_bio); - if (ret2 && !ret) - ret = ret2; + if (bio_flagged(mapped_bio, BIO_USER_MAPPED)) { + bio_unmap_user(mapped_bio); + } else { + ret2 = bio_uncopy_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + } mapped_bio = bio; bio = bio->bi_next; -- cgit v1.2.3 From 7589ad6729d6a707c6ce97693106d42cbb121b42 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Aug 2020 17:37:47 +0200 Subject: block: remove __blk_rq_map_user_iov Just duplicate a small amount of code in the low-level map into the bio and copy to the bio routines, leading to much easier to follow and maintain code, and better shared error handling. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 144 +++++++++++++++++++++----------------------------------- 1 file changed, 54 insertions(+), 90 deletions(-) (limited to 'block') diff --git a/block/blk-map.c b/block/blk-map.c index 10de4809edf9..427962ac2f67 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -127,24 +127,12 @@ static int bio_uncopy_user(struct bio *bio) return ret; } -/** - * bio_copy_user_iov - copy user data to bio - * @q: destination block queue - * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @iter: iovec iterator - * @gfp_mask: memory allocation flags - * - * Prepares and returns a bio for indirect user io, bouncing data - * to/from kernel pages as necessary. Must be paired with - * call bio_uncopy_user() on io completion. - */ -static struct bio *bio_copy_user_iov(struct request_queue *q, - struct rq_map_data *map_data, struct iov_iter *iter, - gfp_t gfp_mask) +static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, + struct iov_iter *iter, gfp_t gfp_mask) { struct bio_map_data *bmd; struct page *page; - struct bio *bio; + struct bio *bio, *bounce_bio; int i = 0, ret; int nr_pages; unsigned int len = iter->count; @@ -152,7 +140,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, bmd = bio_alloc_map_data(iter, gfp_mask); if (!bmd) - return ERR_PTR(-ENOMEM); + return -ENOMEM; /* * We need to do a deep copy of the iov_iter including the iovecs. @@ -169,8 +157,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, bio = bio_kmalloc(gfp_mask, nr_pages); if (!bio) goto out_bmd; - - ret = 0; + bio->bi_opf |= req_op(rq); if (map_data) { nr_pages = 1 << map_data->page_order; @@ -187,7 +174,7 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, if (map_data) { if (i == map_data->nr_entries * nr_pages) { ret = -ENOMEM; - break; + goto cleanup; } page = map_data->pages[i / nr_pages]; @@ -195,14 +182,14 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, i++; } else { - page = alloc_page(q->bounce_gfp | gfp_mask); + page = alloc_page(rq->q->bounce_gfp | gfp_mask); if (!page) { ret = -ENOMEM; - break; + goto cleanup; } } - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { + if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { if (!map_data) __free_page(page); break; @@ -212,9 +199,6 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, offset = 0; } - if (ret) - goto cleanup; - if (map_data) map_data->offset += bio->bi_iter.bi_size; @@ -236,39 +220,42 @@ static struct bio *bio_copy_user_iov(struct request_queue *q, bio->bi_private = bmd; if (map_data && map_data->null_mapped) bmd->is_null_mapped = true; - return bio; + + bounce_bio = bio; + ret = blk_rq_append_bio(rq, &bounce_bio); + if (ret) + goto cleanup; + + /* + * We link the bounce buffer in and could have to traverse it later, so + * we have to get a ref to prevent it from being freed + */ + bio_get(bounce_bio); + return 0; cleanup: if (!map_data) bio_free_pages(bio); bio_put(bio); out_bmd: kfree(bmd); - return ERR_PTR(ret); + return ret; } -/** - * bio_map_user_iov - map user iovec into bio - * @q: the struct request_queue for the bio - * @iter: iovec iterator - * @gfp_mask: memory allocation flags - * - * Map the user space address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -static struct bio *bio_map_user_iov(struct request_queue *q, - struct iov_iter *iter, gfp_t gfp_mask) +static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, + gfp_t gfp_mask) { - unsigned int max_sectors = queue_max_hw_sectors(q); - int j; - struct bio *bio; + unsigned int max_sectors = queue_max_hw_sectors(rq->q); + struct bio *bio, *bounce_bio; int ret; + int j; if (!iov_iter_count(iter)) - return ERR_PTR(-EINVAL); + return -EINVAL; bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); if (!bio) - return ERR_PTR(-ENOMEM); + return -ENOMEM; + bio->bi_opf |= req_op(rq); while (iov_iter_count(iter)) { struct page **pages; @@ -284,7 +271,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q, npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); - if (unlikely(offs & queue_dma_alignment(q))) { + if (unlikely(offs & queue_dma_alignment(rq->q))) { ret = -EINVAL; j = 0; } else { @@ -296,7 +283,7 @@ static struct bio *bio_map_user_iov(struct request_queue *q, if (n > bytes) n = bytes; - if (!bio_add_hw_page(q, bio, page, n, offs, + if (!bio_add_hw_page(rq->q, bio, page, n, offs, max_sectors, &same_page)) { if (same_page) put_page(page); @@ -323,18 +310,30 @@ static struct bio *bio_map_user_iov(struct request_queue *q, bio_set_flag(bio, BIO_USER_MAPPED); /* - * subtle -- if bio_map_user_iov() ended up bouncing a bio, - * it would normally disappear when its bi_end_io is run. - * however, we need it for the unmap, so grab an extra - * reference to it + * Subtle: if we end up needing to bounce a bio, it would normally + * disappear when its bi_end_io is run. However, we need the original + * bio for the unmap, so grab an extra reference to it */ bio_get(bio); - return bio; + bounce_bio = bio; + ret = blk_rq_append_bio(rq, &bounce_bio); + if (ret) + goto out_put_orig; + + /* + * We link the bounce buffer in and could have to traverse it + * later, so we have to get a ref to prevent it from being freed + */ + bio_get(bounce_bio); + return 0; + + out_put_orig: + bio_put(bio); out_unmap: bio_release_pages(bio, false); bio_put(bio); - return ERR_PTR(ret); + return ret; } /** @@ -558,44 +557,6 @@ int blk_rq_append_bio(struct request *rq, struct bio **bio) } EXPORT_SYMBOL(blk_rq_append_bio); -static int __blk_rq_map_user_iov(struct request *rq, - struct rq_map_data *map_data, struct iov_iter *iter, - gfp_t gfp_mask, bool copy) -{ - struct request_queue *q = rq->q; - struct bio *bio, *orig_bio; - int ret; - - if (copy) - bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); - else - bio = bio_map_user_iov(q, iter, gfp_mask); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - bio->bi_opf &= ~REQ_OP_MASK; - bio->bi_opf |= req_op(rq); - - orig_bio = bio; - - /* - * We link the bounce buffer in and could have to traverse it - * later so we have to get a ref to prevent it from being freed - */ - ret = blk_rq_append_bio(rq, &bio); - if (ret) { - if (copy) - bio_uncopy_user(orig_bio); - else - bio_unmap_user(orig_bio); - return ret; - } - bio_get(bio); - - return 0; -} - /** * blk_rq_map_user_iov - map user data to a request, for passthrough requests * @q: request queue where request should be inserted @@ -639,7 +600,10 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, i = *iter; do { - ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); + if (copy) + ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); + else + ret = bio_map_user_iov(rq, &i, gfp_mask); if (ret) goto unmap_rq; if (!bio) -- cgit v1.2.3 From 3310eebafe6f9a872c1f757b3d822dafae9c0cd8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 27 Aug 2020 17:37:48 +0200 Subject: block: remove the BIO_USER_MAPPED flag Just check if there is private data, in which case the bio must have originated from bio_copy_user_iov. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-map.c b/block/blk-map.c index 427962ac2f67..be118926ccf4 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -109,7 +109,7 @@ static int bio_uncopy_user(struct bio *bio) struct bio_map_data *bmd = bio->bi_private; int ret = 0; - if (!bmd || !bmd->is_null_mapped) { + if (!bmd->is_null_mapped) { /* * if we're in a workqueue, the request is orphaned, so * don't copy into a random user address space, just free @@ -307,8 +307,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, break; } - bio_set_flag(bio, BIO_USER_MAPPED); - /* * Subtle: if we end up needing to bounce a bio, it would normally * disappear when its bi_end_io is run. However, we need the original @@ -654,12 +652,12 @@ int blk_rq_unmap_user(struct bio *bio) if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; - if (bio_flagged(mapped_bio, BIO_USER_MAPPED)) { - bio_unmap_user(mapped_bio); - } else { + if (bio->bi_private) { ret2 = bio_uncopy_user(mapped_bio); if (ret2 && !ret) ret = ret2; + } else { + bio_unmap_user(mapped_bio); } mapped_bio = bio; -- cgit v1.2.3 From 339b5a25c28c1d85afd0ce882f57962ad550bc6a Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Sat, 29 Aug 2020 02:13:53 -0700 Subject: blk-wbt: Remove obsolete multiqueue I/O scheduling comment This comment was added before the multiqueue I/O scheduler framework was introduced; multiqueue has support for I/O scheduling now, so this obsolete comment can be removed. Signed-off-by: Danny Lin Signed-off-by: Jens Axboe --- block/Kconfig | 2 -- 1 file changed, 2 deletions(-) (limited to 'block') diff --git a/block/Kconfig b/block/Kconfig index bbad5e8bbffe..a2297edfdde8 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -161,8 +161,6 @@ config BLK_WBT_MQ depends on BLK_WBT help Enable writeback throttling by default on multiqueue devices. - Multiqueue currently doesn't have support for IO scheduling, - enabling this option is recommended. config BLK_DEBUG_FS bool "Block layer debugging information in debugfs" -- cgit v1.2.3 From 8e756373d7c8eb6f8876411bed45e4b1c736eb53 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 28 Aug 2020 10:52:54 +0800 Subject: block: Move bio merge related functions into blk-merge.c It's better to move bio merge related functions into blk-merge.c, which contains all merge related functions. Reviewed-by: Christoph Hellwig Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-core.c | 156 ----------------------------------------------------- block/blk-merge.c | 157 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+), 156 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 10c08ac50697..11661f5b64d6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -643,162 +643,6 @@ void blk_put_request(struct request *req) } EXPORT_SYMBOL(blk_put_request); -static void blk_account_io_merge_bio(struct request *req) -{ - if (!blk_do_io_stat(req)) - return; - - part_stat_lock(); - part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); - part_stat_unlock(); -} - -bool bio_attempt_back_merge(struct request *req, struct bio *bio, - unsigned int nr_segs) -{ - const int ff = bio->bi_opf & REQ_FAILFAST_MASK; - - if (!ll_back_merge_fn(req, bio, nr_segs)) - return false; - - trace_block_bio_backmerge(req->q, req, bio); - rq_qos_merge(req->q, req, bio); - - if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) - blk_rq_set_mixed_merge(req); - - req->biotail->bi_next = bio; - req->biotail = bio; - req->__data_len += bio->bi_iter.bi_size; - - bio_crypt_free_ctx(bio); - - blk_account_io_merge_bio(req); - return true; -} - -bool bio_attempt_front_merge(struct request *req, struct bio *bio, - unsigned int nr_segs) -{ - const int ff = bio->bi_opf & REQ_FAILFAST_MASK; - - if (!ll_front_merge_fn(req, bio, nr_segs)) - return false; - - trace_block_bio_frontmerge(req->q, req, bio); - rq_qos_merge(req->q, req, bio); - - if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) - blk_rq_set_mixed_merge(req); - - bio->bi_next = req->bio; - req->bio = bio; - - req->__sector = bio->bi_iter.bi_sector; - req->__data_len += bio->bi_iter.bi_size; - - bio_crypt_do_front_merge(req, bio); - - blk_account_io_merge_bio(req); - return true; -} - -bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, - struct bio *bio) -{ - unsigned short segments = blk_rq_nr_discard_segments(req); - - if (segments >= queue_max_discard_segments(q)) - goto no_merge; - if (blk_rq_sectors(req) + bio_sectors(bio) > - blk_rq_get_max_sectors(req, blk_rq_pos(req))) - goto no_merge; - - rq_qos_merge(q, req, bio); - - req->biotail->bi_next = bio; - req->biotail = bio; - req->__data_len += bio->bi_iter.bi_size; - req->nr_phys_segments = segments + 1; - - blk_account_io_merge_bio(req); - return true; -no_merge: - req_set_nomerge(q, req); - return false; -} - -/** - * blk_attempt_plug_merge - try to merge with %current's plugged list - * @q: request_queue new bio is being queued at - * @bio: new bio being queued - * @nr_segs: number of segments in @bio - * @same_queue_rq: pointer to &struct request that gets filled in when - * another request associated with @q is found on the plug list - * (optional, may be %NULL) - * - * Determine whether @bio being queued on @q can be merged with a request - * on %current's plugged list. Returns %true if merge was successful, - * otherwise %false. - * - * Plugging coalesces IOs from the same issuer for the same purpose without - * going through @q->queue_lock. As such it's more of an issuing mechanism - * than scheduling, and the request, while may have elvpriv data, is not - * added on the elevator at this point. In addition, we don't have - * reliable access to the elevator outside queue lock. Only check basic - * merging parameters without querying the elevator. - * - * Caller must ensure !blk_queue_nomerges(q) beforehand. - */ -bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs, struct request **same_queue_rq) -{ - struct blk_plug *plug; - struct request *rq; - struct list_head *plug_list; - - plug = blk_mq_plug(q, bio); - if (!plug) - return false; - - plug_list = &plug->mq_list; - - list_for_each_entry_reverse(rq, plug_list, queuelist) { - bool merged = false; - - if (rq->q == q && same_queue_rq) { - /* - * Only blk-mq multiple hardware queues case checks the - * rq in the same queue, there should be only one such - * rq in a queue - **/ - *same_queue_rq = rq; - } - - if (rq->q != q || !blk_rq_merge_ok(rq, bio)) - continue; - - switch (blk_try_merge(rq, bio)) { - case ELEVATOR_BACK_MERGE: - merged = bio_attempt_back_merge(rq, bio, nr_segs); - break; - case ELEVATOR_FRONT_MERGE: - merged = bio_attempt_front_merge(rq, bio, nr_segs); - break; - case ELEVATOR_DISCARD_MERGE: - merged = bio_attempt_discard_merge(q, rq, bio); - break; - default: - break; - } - - if (merged) - return true; - } - - return false; -} - static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; diff --git a/block/blk-merge.c b/block/blk-merge.c index f685d633bcc9..3aa2de57b57f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -11,6 +11,7 @@ #include #include "blk.h" +#include "blk-rq-qos.h" static inline bool bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) @@ -895,3 +896,159 @@ enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) return ELEVATOR_FRONT_MERGE; return ELEVATOR_NO_MERGE; } + +static void blk_account_io_merge_bio(struct request *req) +{ + if (!blk_do_io_stat(req)) + return; + + part_stat_lock(); + part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); + part_stat_unlock(); +} + +bool bio_attempt_back_merge(struct request *req, struct bio *bio, + unsigned int nr_segs) +{ + const int ff = bio->bi_opf & REQ_FAILFAST_MASK; + + if (!ll_back_merge_fn(req, bio, nr_segs)) + return false; + + trace_block_bio_backmerge(req->q, req, bio); + rq_qos_merge(req->q, req, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) + blk_rq_set_mixed_merge(req); + + req->biotail->bi_next = bio; + req->biotail = bio; + req->__data_len += bio->bi_iter.bi_size; + + bio_crypt_free_ctx(bio); + + blk_account_io_merge_bio(req); + return true; +} + +bool bio_attempt_front_merge(struct request *req, struct bio *bio, + unsigned int nr_segs) +{ + const int ff = bio->bi_opf & REQ_FAILFAST_MASK; + + if (!ll_front_merge_fn(req, bio, nr_segs)) + return false; + + trace_block_bio_frontmerge(req->q, req, bio); + rq_qos_merge(req->q, req, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) + blk_rq_set_mixed_merge(req); + + bio->bi_next = req->bio; + req->bio = bio; + + req->__sector = bio->bi_iter.bi_sector; + req->__data_len += bio->bi_iter.bi_size; + + bio_crypt_do_front_merge(req, bio); + + blk_account_io_merge_bio(req); + return true; +} + +bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, + struct bio *bio) +{ + unsigned short segments = blk_rq_nr_discard_segments(req); + + if (segments >= queue_max_discard_segments(q)) + goto no_merge; + if (blk_rq_sectors(req) + bio_sectors(bio) > + blk_rq_get_max_sectors(req, blk_rq_pos(req))) + goto no_merge; + + rq_qos_merge(q, req, bio); + + req->biotail->bi_next = bio; + req->biotail = bio; + req->__data_len += bio->bi_iter.bi_size; + req->nr_phys_segments = segments + 1; + + blk_account_io_merge_bio(req); + return true; +no_merge: + req_set_nomerge(q, req); + return false; +} + +/** + * blk_attempt_plug_merge - try to merge with %current's plugged list + * @q: request_queue new bio is being queued at + * @bio: new bio being queued + * @nr_segs: number of segments in @bio + * @same_queue_rq: pointer to &struct request that gets filled in when + * another request associated with @q is found on the plug list + * (optional, may be %NULL) + * + * Determine whether @bio being queued on @q can be merged with a request + * on %current's plugged list. Returns %true if merge was successful, + * otherwise %false. + * + * Plugging coalesces IOs from the same issuer for the same purpose without + * going through @q->queue_lock. As such it's more of an issuing mechanism + * than scheduling, and the request, while may have elvpriv data, is not + * added on the elevator at this point. In addition, we don't have + * reliable access to the elevator outside queue lock. Only check basic + * merging parameters without querying the elevator. + * + * Caller must ensure !blk_queue_nomerges(q) beforehand. + */ +bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, + unsigned int nr_segs, struct request **same_queue_rq) +{ + struct blk_plug *plug; + struct request *rq; + struct list_head *plug_list; + + plug = blk_mq_plug(q, bio); + if (!plug) + return false; + + plug_list = &plug->mq_list; + + list_for_each_entry_reverse(rq, plug_list, queuelist) { + bool merged = false; + + if (rq->q == q && same_queue_rq) { + /* + * Only blk-mq multiple hardware queues case checks the + * rq in the same queue, there should be only one such + * rq in a queue + **/ + *same_queue_rq = rq; + } + + if (rq->q != q || !blk_rq_merge_ok(rq, bio)) + continue; + + switch (blk_try_merge(rq, bio)) { + case ELEVATOR_BACK_MERGE: + merged = bio_attempt_back_merge(rq, bio, nr_segs); + break; + case ELEVATOR_FRONT_MERGE: + merged = bio_attempt_front_merge(rq, bio, nr_segs); + break; + case ELEVATOR_DISCARD_MERGE: + merged = bio_attempt_discard_merge(q, rq, bio); + break; + default: + break; + } + + if (merged) + return true; + } + + return false; +} -- cgit v1.2.3 From bdc6a287bc98e8f32bf52c9cb2d1bdf75975f5a0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 28 Aug 2020 10:52:55 +0800 Subject: block: Move blk_mq_bio_list_merge() into blk-merge.c Move the blk_mq_bio_list_merge() into blk-merge.c and rename it as a generic name. Reviewed-by: Christoph Hellwig Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-merge.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ block/blk-mq-sched.c | 46 +--------------------------------------------- block/blk.h | 2 ++ block/kyber-iosched.c | 2 +- 4 files changed, 48 insertions(+), 46 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index 3aa2de57b57f..b09e9fc44236 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1052,3 +1052,47 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, return false; } + +/* + * Iterate list of requests and see if we can merge this bio with any + * of them. + */ +bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, + struct bio *bio, unsigned int nr_segs) +{ + struct request *rq; + int checked = 8; + + list_for_each_entry_reverse(rq, list, queuelist) { + bool merged = false; + + if (!checked--) + break; + + if (!blk_rq_merge_ok(rq, bio)) + continue; + + switch (blk_try_merge(rq, bio)) { + case ELEVATOR_BACK_MERGE: + if (blk_mq_sched_allow_merge(q, rq, bio)) + merged = bio_attempt_back_merge(rq, bio, + nr_segs); + break; + case ELEVATOR_FRONT_MERGE: + if (blk_mq_sched_allow_merge(q, rq, bio)) + merged = bio_attempt_front_merge(rq, bio, + nr_segs); + break; + case ELEVATOR_DISCARD_MERGE: + merged = bio_attempt_discard_merge(q, rq, bio); + break; + default: + continue; + } + + return merged; + } + + return false; +} +EXPORT_SYMBOL_GPL(blk_bio_list_merge); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index d2790e5b06d1..82acff96c093 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -391,50 +391,6 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, } EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); -/* - * Iterate list of requests and see if we can merge this bio with any - * of them. - */ -bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, - struct bio *bio, unsigned int nr_segs) -{ - struct request *rq; - int checked = 8; - - list_for_each_entry_reverse(rq, list, queuelist) { - bool merged = false; - - if (!checked--) - break; - - if (!blk_rq_merge_ok(rq, bio)) - continue; - - switch (blk_try_merge(rq, bio)) { - case ELEVATOR_BACK_MERGE: - if (blk_mq_sched_allow_merge(q, rq, bio)) - merged = bio_attempt_back_merge(rq, bio, - nr_segs); - break; - case ELEVATOR_FRONT_MERGE: - if (blk_mq_sched_allow_merge(q, rq, bio)) - merged = bio_attempt_front_merge(rq, bio, - nr_segs); - break; - case ELEVATOR_DISCARD_MERGE: - merged = bio_attempt_discard_merge(q, rq, bio); - break; - default: - continue; - } - - return merged; - } - - return false; -} -EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); - /* * Reverse check our software queue for entries that we could potentially * merge with. Currently includes a hand-wavy stop count of 8, to not spend @@ -449,7 +405,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q, lockdep_assert_held(&ctx->lock); - if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { + if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { ctx->rq_merged++; return true; } diff --git a/block/blk.h b/block/blk.h index 49e2928a1632..d6152d20d4e2 100644 --- a/block/blk.h +++ b/block/blk.h @@ -177,6 +177,8 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, struct bio *bio); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq); +bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, + struct bio *bio, unsigned int nr_segs); void blk_account_io_start(struct request *req); void blk_account_io_done(struct request *req, u64 now); diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a38c5ab103d1..6d4ba0e9688e 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -573,7 +573,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, bool merged; spin_lock(&kcq->lock); - merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); + merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); spin_unlock(&kcq->lock); return merged; -- cgit v1.2.3 From 7d7ca7c5269becab86c9f595309c8e90ce268967 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 28 Aug 2020 10:52:56 +0800 Subject: block: Add a new helper to attempt to merge a bio There are lots of duplicated code when trying to merge a bio from plug list and sw queue, we can introduce a new helper to attempt to merge a bio, which can simplify the blk_bio_list_merge() and blk_attempt_plug_merge(). Reviewed-by: Christoph Hellwig Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-merge.c | 104 ++++++++++++++++++++++++++------------------------- block/blk-mq-sched.c | 6 +-- block/blk.h | 21 ++++++++--- 3 files changed, 71 insertions(+), 60 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index b09e9fc44236..80c974484a3f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -907,13 +907,14 @@ static void blk_account_io_merge_bio(struct request *req) part_stat_unlock(); } -bool bio_attempt_back_merge(struct request *req, struct bio *bio, - unsigned int nr_segs) +enum bio_merge_status bio_attempt_back_merge(struct request *req, + struct bio *bio, + unsigned int nr_segs) { const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_back_merge_fn(req, bio, nr_segs)) - return false; + return BIO_MERGE_FAILED; trace_block_bio_backmerge(req->q, req, bio); rq_qos_merge(req->q, req, bio); @@ -928,16 +929,17 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio, bio_crypt_free_ctx(bio); blk_account_io_merge_bio(req); - return true; + return BIO_MERGE_OK; } -bool bio_attempt_front_merge(struct request *req, struct bio *bio, - unsigned int nr_segs) +enum bio_merge_status bio_attempt_front_merge(struct request *req, + struct bio *bio, + unsigned int nr_segs) { const int ff = bio->bi_opf & REQ_FAILFAST_MASK; if (!ll_front_merge_fn(req, bio, nr_segs)) - return false; + return BIO_MERGE_FAILED; trace_block_bio_frontmerge(req->q, req, bio); rq_qos_merge(req->q, req, bio); @@ -954,11 +956,12 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio, bio_crypt_do_front_merge(req, bio); blk_account_io_merge_bio(req); - return true; + return BIO_MERGE_OK; } -bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, - struct bio *bio) +enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, + struct request *req, + struct bio *bio) { unsigned short segments = blk_rq_nr_discard_segments(req); @@ -976,10 +979,39 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, req->nr_phys_segments = segments + 1; blk_account_io_merge_bio(req); - return true; + return BIO_MERGE_OK; no_merge: req_set_nomerge(q, req); - return false; + return BIO_MERGE_FAILED; +} + +static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, + struct request *rq, + struct bio *bio, + unsigned int nr_segs, + bool sched_allow_merge) +{ + if (!blk_rq_merge_ok(rq, bio)) + return BIO_MERGE_NONE; + + switch (blk_try_merge(rq, bio)) { + case ELEVATOR_BACK_MERGE: + if (!sched_allow_merge || + (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio))) + return bio_attempt_back_merge(rq, bio, nr_segs); + break; + case ELEVATOR_FRONT_MERGE: + if (!sched_allow_merge || + (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio))) + return bio_attempt_front_merge(rq, bio, nr_segs); + break; + case ELEVATOR_DISCARD_MERGE: + return bio_attempt_discard_merge(q, rq, bio); + default: + return BIO_MERGE_NONE; + } + + return BIO_MERGE_FAILED; } /** @@ -1018,8 +1050,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, plug_list = &plug->mq_list; list_for_each_entry_reverse(rq, plug_list, queuelist) { - bool merged = false; - if (rq->q == q && same_queue_rq) { /* * Only blk-mq multiple hardware queues case checks the @@ -1029,24 +1059,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, *same_queue_rq = rq; } - if (rq->q != q || !blk_rq_merge_ok(rq, bio)) + if (rq->q != q) continue; - switch (blk_try_merge(rq, bio)) { - case ELEVATOR_BACK_MERGE: - merged = bio_attempt_back_merge(rq, bio, nr_segs); - break; - case ELEVATOR_FRONT_MERGE: - merged = bio_attempt_front_merge(rq, bio, nr_segs); - break; - case ELEVATOR_DISCARD_MERGE: - merged = bio_attempt_discard_merge(q, rq, bio); - break; - default: - break; - } - - if (merged) + if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == + BIO_MERGE_OK) return true; } @@ -1064,33 +1081,18 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, int checked = 8; list_for_each_entry_reverse(rq, list, queuelist) { - bool merged = false; - if (!checked--) break; - if (!blk_rq_merge_ok(rq, bio)) - continue; - - switch (blk_try_merge(rq, bio)) { - case ELEVATOR_BACK_MERGE: - if (blk_mq_sched_allow_merge(q, rq, bio)) - merged = bio_attempt_back_merge(rq, bio, - nr_segs); - break; - case ELEVATOR_FRONT_MERGE: - if (blk_mq_sched_allow_merge(q, rq, bio)) - merged = bio_attempt_front_merge(rq, bio, - nr_segs); - break; - case ELEVATOR_DISCARD_MERGE: - merged = bio_attempt_discard_merge(q, rq, bio); - break; - default: + switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { + case BIO_MERGE_NONE: continue; + case BIO_MERGE_OK: + return true; + case BIO_MERGE_FAILED: + return false; } - return merged; } return false; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 82acff96c093..94db0c9d59b8 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -368,7 +368,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, case ELEVATOR_BACK_MERGE: if (!blk_mq_sched_allow_merge(q, rq, bio)) return false; - if (!bio_attempt_back_merge(rq, bio, nr_segs)) + if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) return false; *merged_request = attempt_back_merge(q, rq); if (!*merged_request) @@ -377,14 +377,14 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, case ELEVATOR_FRONT_MERGE: if (!blk_mq_sched_allow_merge(q, rq, bio)) return false; - if (!bio_attempt_front_merge(rq, bio, nr_segs)) + if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) return false; *merged_request = attempt_front_merge(q, rq); if (!*merged_request) elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); return true; case ELEVATOR_DISCARD_MERGE: - return bio_attempt_discard_merge(q, rq, bio); + return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; default: return false; } diff --git a/block/blk.h b/block/blk.h index d6152d20d4e2..a180443ac13f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -29,6 +29,12 @@ struct blk_flush_queue { spinlock_t mq_flush_lock; }; +enum bio_merge_status { + BIO_MERGE_OK, + BIO_MERGE_NONE, + BIO_MERGE_FAILED, +}; + extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; @@ -169,12 +175,15 @@ static inline void blk_integrity_del(struct gendisk *disk) unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); -bool bio_attempt_front_merge(struct request *req, struct bio *bio, - unsigned int nr_segs); -bool bio_attempt_back_merge(struct request *req, struct bio *bio, - unsigned int nr_segs); -bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, - struct bio *bio); +enum bio_merge_status bio_attempt_front_merge(struct request *req, + struct bio *bio, + unsigned int nr_segs); +enum bio_merge_status bio_attempt_back_merge(struct request *req, + struct bio *bio, + unsigned int nr_segs); +enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, + struct request *req, + struct bio *bio); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq); bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, -- cgit v1.2.3 From cdfcef9ee87745d9511ad8825e1d2b8b861884da Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 28 Aug 2020 10:52:57 +0800 Subject: block: Remove blk_mq_attempt_merge() function The small blk_mq_attempt_merge() function is only called by __blk_mq_sched_bio_merge(), just open code it. Reviewed-by: Christoph Hellwig Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 44 ++++++++++++++++---------------------------- 1 file changed, 16 insertions(+), 28 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 94db0c9d59b8..205d9716f7a5 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -391,28 +391,6 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, } EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); -/* - * Reverse check our software queue for entries that we could potentially - * merge with. Currently includes a hand-wavy stop count of 8, to not spend - * too much time checking for merges. - */ -static bool blk_mq_attempt_merge(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, struct bio *bio, - unsigned int nr_segs) -{ - enum hctx_type type = hctx->type; - - lockdep_assert_held(&ctx->lock); - - if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { - ctx->rq_merged++; - return true; - } - - return false; -} - bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { @@ -426,14 +404,24 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, return e->type->ops.bio_merge(hctx, bio, nr_segs); type = hctx->type; - if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && - !list_empty_careful(&ctx->rq_lists[type])) { - /* default per sw-queue merge */ - spin_lock(&ctx->lock); - ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); - spin_unlock(&ctx->lock); + if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || + list_empty_careful(&ctx->rq_lists[type])) + return false; + + /* default per sw-queue merge */ + spin_lock(&ctx->lock); + /* + * Reverse check our software queue for entries that we could + * potentially merge with. Currently includes a hand-wavy stop + * count of 8, to not spend too much time checking for merges. + */ + if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { + ctx->rq_merged++; + ret = true; } + spin_unlock(&ctx->lock); + return ret; } -- cgit v1.2.3 From e44a6a2359808e0b97ff894569748ad2593b06f9 Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Thu, 27 Aug 2020 14:34:17 +0800 Subject: blk-mq: use BLK_MQ_NO_TAG for no tag Replace various magic -1 constants for tags with BLK_MQ_NO_TAG. Signed-off-by: Xianting Tian Signed-off-by: Jens Axboe --- block/blk-core.c | 4 ++-- block/blk-mq-sched.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 11661f5b64d6..062efdedc994 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -116,8 +116,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq) rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); - rq->tag = -1; - rq->internal_tag = -1; + rq->tag = BLK_MQ_NO_TAG; + rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; refcount_set(&rq->ref, 1); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 205d9716f7a5..501a85ceaccb 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -475,7 +475,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, goto run; } - WARN_ON(e && (rq->tag != -1)); + WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { /* -- cgit v1.2.3 From 7b8917f5e29c377be1db5680249fe30e038cb3eb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 31 Aug 2020 20:02:33 +0200 Subject: block: remove the alignment_offset field from struct hd_struct The alignment offset is only used in slow path callers, so just calculate it on the fly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/partitions/core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/partitions/core.c b/block/partitions/core.c index b1c0b50ca92d..94c2fb39e602 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -199,7 +199,10 @@ static ssize_t part_alignment_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); + + return sprintf(buf, "%u\n", + queue_limit_alignment_offset(&part_to_disk(p)->queue->limits, + p->start_sect)); } static ssize_t part_discard_alignment_show(struct device *dev, @@ -405,8 +408,6 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno, pdev = part_to_dev(p); p->start_sect = start; - p->alignment_offset = - queue_limit_alignment_offset(&disk->queue->limits, start); p->discard_alignment = queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; -- cgit v1.2.3 From 7cf34d97ab45203b975396393ded9d3867dfa8bf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 31 Aug 2020 20:02:34 +0200 Subject: block: remove the discard_alignment field from struct hd_struct The alignment offset is only used in slow path callers, so just calculate it on the fly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/partitions/core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/partitions/core.c b/block/partitions/core.c index 94c2fb39e602..e596a85bba1e 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -209,7 +209,10 @@ static ssize_t part_discard_alignment_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - return sprintf(buf, "%u\n", p->discard_alignment); + + return sprintf(buf, "%u\n", + queue_limit_discard_alignment(&part_to_disk(p)->queue->limits, + p->start_sect)); } static DEVICE_ATTR(partition, 0444, part_partition_show, NULL); @@ -408,8 +411,6 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno, pdev = part_to_dev(p); p->start_sect = start; - p->discard_alignment = - queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); -- cgit v1.2.3 From f93af2a494e1d28377065a2320d11da98110a970 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 31 Aug 2020 20:02:37 +0200 Subject: block: cleanup __alloc_disk_node Use early returns and goto-based unwinding to simplify the flow a bit. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 73 +++++++++++++++++++++++++++++++---------------------------- 1 file changed, 38 insertions(+), 35 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 99c64641c314..055ce9cf1835 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1729,45 +1729,48 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) } disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); - if (disk) { - disk->part0.dkstats = alloc_percpu(struct disk_stats); - if (!disk->part0.dkstats) { - kfree(disk); - return NULL; - } - init_rwsem(&disk->lookup_sem); - disk->node_id = node_id; - if (disk_expand_part_tbl(disk, 0)) { - free_percpu(disk->part0.dkstats); - kfree(disk); - return NULL; - } - ptbl = rcu_dereference_protected(disk->part_tbl, 1); - rcu_assign_pointer(ptbl->part[0], &disk->part0); + if (!disk) + return NULL; - /* - * set_capacity() and get_capacity() currently don't use - * seqcounter to read/update the part0->nr_sects. Still init - * the counter as we can read the sectors in IO submission - * patch using seqence counters. - * - * TODO: Ideally set_capacity() and get_capacity() should be - * converted to make use of bd_mutex and sequence counters. - */ - hd_sects_seq_init(&disk->part0); - if (hd_ref_init(&disk->part0)) { - hd_free_part(&disk->part0); - kfree(disk); - return NULL; - } + disk->part0.dkstats = alloc_percpu(struct disk_stats); + if (!disk->part0.dkstats) + goto out_free_disk; - disk->minors = minors; - rand_initialize_disk(disk); - disk_to_dev(disk)->class = &block_class; - disk_to_dev(disk)->type = &disk_type; - device_initialize(disk_to_dev(disk)); + init_rwsem(&disk->lookup_sem); + disk->node_id = node_id; + if (disk_expand_part_tbl(disk, 0)) { + free_percpu(disk->part0.dkstats); + goto out_free_disk; } + + ptbl = rcu_dereference_protected(disk->part_tbl, 1); + rcu_assign_pointer(ptbl->part[0], &disk->part0); + + /* + * set_capacity() and get_capacity() currently don't use + * seqcounter to read/update the part0->nr_sects. Still init + * the counter as we can read the sectors in IO submission + * patch using seqence counters. + * + * TODO: Ideally set_capacity() and get_capacity() should be + * converted to make use of bd_mutex and sequence counters. + */ + hd_sects_seq_init(&disk->part0); + if (hd_ref_init(&disk->part0)) + goto out_free_part0; + + disk->minors = minors; + rand_initialize_disk(disk); + disk_to_dev(disk)->class = &block_class; + disk_to_dev(disk)->type = &disk_type; + device_initialize(disk_to_dev(disk)); return disk; + +out_free_part0: + hd_free_part(&disk->part0); +out_free_disk: + kfree(disk); + return NULL; } EXPORT_SYMBOL(__alloc_disk_node); -- cgit v1.2.3 From 8328eb28369a7dbfab6ff26366dbe8094425acc4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 31 Aug 2020 20:02:38 +0200 Subject: block: remove the disk argument to delete_partition We can trivially derive the gendisk from the hd_struct. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk.h | 2 +- block/genhd.c | 2 +- block/partitions/core.c | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk.h b/block/blk.h index a180443ac13f..c08762e10b04 100644 --- a/block/blk.h +++ b/block/blk.h @@ -361,7 +361,7 @@ char *disk_name(struct gendisk *hd, int partno, char *buf); #define ADDPART_FLAG_NONE 0 #define ADDPART_FLAG_RAID 1 #define ADDPART_FLAG_WHOLEDISK 2 -void delete_partition(struct gendisk *disk, struct hd_struct *part); +void delete_partition(struct hd_struct *part); int bdev_add_partition(struct block_device *bdev, int partno, sector_t start, sector_t length); int bdev_del_partition(struct block_device *bdev, int partno); diff --git a/block/genhd.c b/block/genhd.c index 055ce9cf1835..2055b5bf637a 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -913,7 +913,7 @@ void del_gendisk(struct gendisk *disk) DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); while ((part = disk_part_iter_next(&piter))) { invalidate_partition(disk, part->partno); - delete_partition(disk, part); + delete_partition(part); } disk_part_iter_exit(&piter); diff --git a/block/partitions/core.c b/block/partitions/core.c index e596a85bba1e..dd6811422a87 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -324,8 +324,9 @@ int hd_ref_init(struct hd_struct *part) * Must be called either with bd_mutex held, before a disk can be opened or * after all disk users are gone. */ -void delete_partition(struct gendisk *disk, struct hd_struct *part) +void delete_partition(struct hd_struct *part) { + struct gendisk *disk = part_to_disk(part); struct disk_part_tbl *ptbl = rcu_dereference_protected(disk->part_tbl, 1); @@ -333,7 +334,7 @@ void delete_partition(struct gendisk *disk, struct hd_struct *part) * ->part_tbl is referenced in this part's release handler, so * we have to hold the disk device */ - get_device(disk_to_dev(part_to_disk(part))); + get_device(disk_to_dev(disk)); rcu_assign_pointer(ptbl->part[part->partno], NULL); kobject_put(part->holder_dir); device_del(part_to_dev(part)); @@ -556,7 +557,7 @@ int bdev_del_partition(struct block_device *bdev, int partno) sync_blockdev(bdevp); invalidate_bdev(bdevp); - delete_partition(bdev->bd_disk, part); + delete_partition(part); ret = 0; out_unlock: mutex_unlock(&bdev->bd_mutex); @@ -636,7 +637,7 @@ int blk_drop_partitions(struct block_device *bdev) disk_part_iter_init(&piter, bdev->bd_disk, DISK_PITER_INCL_EMPTY); while ((part = disk_part_iter_next(&piter))) - delete_partition(bdev->bd_disk, part); + delete_partition(part); disk_part_iter_exit(&piter); return 0; -- cgit v1.2.3 From 1f06959bd2c96342dbac8e29994dd4f69deb956e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 31 Aug 2020 20:02:39 +0200 Subject: block: remove the unused q argument to part_in_flight and part_in_flight_rw Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 2055b5bf637a..5fc6d82e6c68 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -110,8 +110,7 @@ static void part_stat_read_all(struct hd_struct *part, struct disk_stats *stat) } } -static unsigned int part_in_flight(struct request_queue *q, - struct hd_struct *part) +static unsigned int part_in_flight(struct hd_struct *part) { unsigned int inflight = 0; int cpu; @@ -126,8 +125,7 @@ static unsigned int part_in_flight(struct request_queue *q, return inflight; } -static void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]) +static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2]) { int cpu; @@ -1301,7 +1299,7 @@ ssize_t part_stat_show(struct device *dev, if (queue_is_mq(q)) inflight = blk_mq_in_flight(q, p); else - inflight = part_in_flight(q, p); + inflight = part_in_flight(p); return sprintf(buf, "%8lu %8lu %8llu %8u " @@ -1343,7 +1341,7 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, if (queue_is_mq(q)) blk_mq_in_flight_rw(q, p, inflight); else - part_in_flight_rw(q, p, inflight); + part_in_flight_rw(p, inflight); return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } @@ -1623,7 +1621,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) if (queue_is_mq(gp->queue)) inflight = blk_mq_in_flight(gp->queue, hd); else - inflight = part_in_flight(gp->queue, hd); + inflight = part_in_flight(hd); seq_printf(seqf, "%4d %7d %s " "%lu %lu %lu %u " -- cgit v1.2.3 From 5e124f74325d8181b3275a704e87e44246c484f6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:33 -0400 Subject: blk-iocost: use local[64]_t for percpu stat blk-iocost has been reading percpu stat counters from remote cpus which on some archs can lead to torn reads in really rare occassions. Use local[64]_t for those counters. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d37b55db2409..e2266e7692b4 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -179,6 +179,8 @@ #include #include #include +#include +#include #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk-wbt.h" @@ -373,8 +375,8 @@ struct ioc_params { }; struct ioc_missed { - u32 nr_met; - u32 nr_missed; + local_t nr_met; + local_t nr_missed; u32 last_met; u32 last_missed; }; @@ -382,7 +384,7 @@ struct ioc_missed { struct ioc_pcpu_stat { struct ioc_missed missed[2]; - u64 rq_wait_ns; + local64_t rq_wait_ns; u64 last_rq_wait_ns; }; @@ -1278,8 +1280,8 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p u64 this_rq_wait_ns; for (rw = READ; rw <= WRITE; rw++) { - u32 this_met = READ_ONCE(stat->missed[rw].nr_met); - u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed); + u32 this_met = local_read(&stat->missed[rw].nr_met); + u32 this_missed = local_read(&stat->missed[rw].nr_missed); nr_met[rw] += this_met - stat->missed[rw].last_met; nr_missed[rw] += this_missed - stat->missed[rw].last_missed; @@ -1287,7 +1289,7 @@ static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p stat->missed[rw].last_missed = this_missed; } - this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns); + this_rq_wait_ns = local64_read(&stat->rq_wait_ns); rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; stat->last_rq_wait_ns = this_rq_wait_ns; } @@ -1908,6 +1910,7 @@ static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) { struct ioc *ioc = rqos_to_ioc(rqos); + struct ioc_pcpu_stat *ccs; u64 on_q_ns, rq_wait_ns, size_nsec; int pidx, rw; @@ -1931,13 +1934,17 @@ static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); + ccs = get_cpu_ptr(ioc->pcpu_stat); + if (on_q_ns <= size_nsec || on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met); + local_inc(&ccs->missed[rw].nr_met); else - this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed); + local_inc(&ccs->missed[rw].nr_missed); + + local64_add(rq_wait_ns, &ccs->rq_wait_ns); - this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns); + put_cpu_ptr(ccs); } static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) @@ -1977,7 +1984,7 @@ static int blk_iocost_init(struct request_queue *q) { struct ioc *ioc; struct rq_qos *rqos; - int ret; + int i, cpu, ret; ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) @@ -1989,6 +1996,16 @@ static int blk_iocost_init(struct request_queue *q) return -ENOMEM; } + for_each_possible_cpu(cpu) { + struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); + + for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) { + local_set(&ccs->missed[i].nr_met, 0); + local_set(&ccs->missed[i].nr_missed, 0); + } + local64_set(&ccs->rq_wait_ns, 0); + } + rqos = &ioc->rqos; rqos->id = RQ_QOS_COST; rqos->ops = &ioc_rqos_ops; -- cgit v1.2.3 From 00410f1b09fe7c9a12bde07f0bb4b978a3367f3a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:34 -0400 Subject: blk-iocost: rename propagate_active_weights() to propagate_weights() It already propagates two weights - active and inuse - and there will be another soon. Let's drop the confusing misnomers. Rename [__]propagate_active_weights() to [__]propagate_weights() and commit_active_weights() to commit_weights(). This is pure rename. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index e2266e7692b4..78e6919153d8 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -479,7 +479,7 @@ struct ioc_gq { atomic64_t active_period; struct list_head active_list; - /* see __propagate_active_weight() and current_hweight() for details */ + /* see __propagate_weights() and current_hweight() for details */ u64 child_active_sum; u64 child_inuse_sum; int hweight_gen; @@ -890,7 +890,7 @@ static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) * Update @iocg's `active` and `inuse` to @active and @inuse, update level * weight sums and propagate upwards accordingly. */ -static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse) +static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) { struct ioc *ioc = iocg->ioc; int lvl; @@ -935,7 +935,7 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse ioc->weights_updated = true; } -static void commit_active_weights(struct ioc *ioc) +static void commit_weights(struct ioc *ioc) { lockdep_assert_held(&ioc->lock); @@ -947,10 +947,10 @@ static void commit_active_weights(struct ioc *ioc) } } -static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse) +static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) { - __propagate_active_weight(iocg, active, inuse); - commit_active_weights(iocg->ioc); + __propagate_weights(iocg, active, inuse); + commit_weights(iocg->ioc); } static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep) @@ -966,9 +966,9 @@ static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep goto out; /* - * Paired with wmb in commit_active_weights(). If we saw the - * updated hweight_gen, all the weight updates from - * __propagate_active_weight() are visible too. + * Paired with wmb in commit_weights(). If we saw the updated + * hweight_gen, all the weight updates from __propagate_weights() are + * visible too. * * We can race with weight updates during calculation and get it * wrong. However, hweight_gen would have changed and a future @@ -1018,7 +1018,7 @@ static void weight_updated(struct ioc_gq *iocg) weight = iocg->cfg_weight ?: iocc->dfl_weight; if (weight != iocg->weight && iocg->active) - propagate_active_weight(iocg, weight, + propagate_weights(iocg, weight, DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight)); iocg->weight = weight; } @@ -1090,8 +1090,8 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) */ iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; list_add(&iocg->active_list, &ioc->active_iocgs); - propagate_active_weight(iocg, iocg->weight, - iocg->last_inuse ?: iocg->weight); + propagate_weights(iocg, iocg->weight, + iocg->last_inuse ?: iocg->weight); TRACE_IOCG_PATH(iocg_activate, iocg, now, last_period, cur_period, vtime); @@ -1384,13 +1384,13 @@ static void ioc_timer_fn(struct timer_list *timer) } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ iocg->last_inuse = iocg->inuse; - __propagate_active_weight(iocg, 0, 0); + __propagate_weights(iocg, 0, 0); list_del_init(&iocg->active_list); } spin_unlock(&iocg->waitq.lock); } - commit_active_weights(ioc); + commit_weights(ioc); /* calc usages and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { @@ -1483,8 +1483,8 @@ static void ioc_timer_fn(struct timer_list *timer) TRACE_IOCG_PATH(inuse_takeback, iocg, &now, iocg->inuse, new_inuse, hw_inuse, new_hwi); - __propagate_active_weight(iocg, iocg->weight, - new_inuse); + __propagate_weights(iocg, iocg->weight, + new_inuse); } } else { /* genuninely out of vtime */ @@ -1524,11 +1524,11 @@ static void ioc_timer_fn(struct timer_list *timer) TRACE_IOCG_PATH(inuse_giveaway, iocg, &now, iocg->inuse, new_inuse, hw_inuse, new_hwi); - __propagate_active_weight(iocg, iocg->weight, new_inuse); + __propagate_weights(iocg, iocg->weight, new_inuse); } } skip_surplus_transfers: - commit_active_weights(ioc); + commit_weights(ioc); /* * If q is getting clogged or we're missing too much, we're issuing @@ -1753,7 +1753,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) TRACE_IOCG_PATH(inuse_reset, iocg, &now, iocg->inuse, iocg->weight, hw_inuse, hw_active); spin_lock_irq(&ioc->lock); - propagate_active_weight(iocg, iocg->weight, iocg->weight); + propagate_weights(iocg, iocg->weight, iocg->weight); spin_unlock_irq(&ioc->lock); current_hweight(iocg, &hw_active, &hw_inuse); } @@ -2114,7 +2114,7 @@ static void ioc_pd_free(struct blkg_policy_data *pd) if (ioc) { spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { - propagate_active_weight(iocg, 0, 0); + propagate_weights(iocg, 0, 0); list_del_init(&iocg->active_list); } spin_unlock_irqrestore(&ioc->lock, flags); -- cgit v1.2.3 From db84a72af6be422abf2089a5896293590dda5066 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:35 -0400 Subject: blk-iocost: clamp inuse and skip noops in __propagate_weights() __propagate_weights() currently expects the callers to clamp inuse within [1, active], which is needlessly fragile. The inuse adjustment logic is going to be revamped, in preparation, let's make __propagate_weights() clamp inuse on entry. Also, make it avoid weight updates altogether if neither active or inuse is changed. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 78e6919153d8..8dfe73dde2a8 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -897,7 +897,10 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) lockdep_assert_held(&ioc->lock); - inuse = min(active, inuse); + inuse = clamp_t(u32, inuse, 1, active); + + if (active == iocg->active && inuse == iocg->inuse) + return; for (lvl = iocg->level - 1; lvl >= 0; lvl--) { struct ioc_gq *parent = iocg->ancestors[lvl]; -- cgit v1.2.3 From 6ef20f787b0aa337dc901d42efa44854e09c87e1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:36 -0400 Subject: blk-iocost: move iocg_kick_delay() above iocg_kick_waitq() We'll make iocg_kick_waitq() call iocg_kick_delay(). Reorder them in preparation. This is pure code reorganization. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 120 ++++++++++++++++++++++++++--------------------------- 1 file changed, 60 insertions(+), 60 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 8dfe73dde2a8..ac22d761a350 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1115,6 +1115,66 @@ fail_unlock: return false; } +static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) +{ + struct ioc *ioc = iocg->ioc; + struct blkcg_gq *blkg = iocg_to_blkg(iocg); + u64 vtime = atomic64_read(&iocg->vtime); + u64 vmargin = ioc->margin_us * now->vrate; + u64 margin_ns = ioc->margin_us * NSEC_PER_USEC; + u64 delta_ns, expires, oexpires; + u32 hw_inuse; + + lockdep_assert_held(&iocg->waitq.lock); + + /* debt-adjust vtime */ + current_hweight(iocg, NULL, &hw_inuse); + vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + + /* + * Clear or maintain depending on the overage. Non-zero vdebt is what + * guarantees that @iocg is online and future iocg_kick_delay() will + * clear use_delay. Don't leave it on when there's no vdebt. + */ + if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { + blkcg_clear_delay(blkg); + return false; + } + if (!atomic_read(&blkg->use_delay) && + time_before_eq64(vtime, now->vnow + vmargin)) + return false; + + /* use delay */ + delta_ns = DIV64_U64_ROUND_UP(vtime - now->vnow, + now->vrate) * NSEC_PER_USEC; + blkcg_set_delay(blkg, delta_ns); + expires = now->now_ns + delta_ns; + + /* if already active and close enough, don't bother */ + oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); + if (hrtimer_is_queued(&iocg->delay_timer) && + abs(oexpires - expires) <= margin_ns / 4) + return true; + + hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), + margin_ns / 4, HRTIMER_MODE_ABS); + return true; +} + +static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) +{ + struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); + struct ioc_now now; + unsigned long flags; + + spin_lock_irqsave(&iocg->waitq.lock, flags); + ioc_now(iocg->ioc, &now); + iocg_kick_delay(iocg, &now); + spin_unlock_irqrestore(&iocg->waitq.lock, flags); + + return HRTIMER_NORESTART; +} + static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key) { @@ -1211,66 +1271,6 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } -static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) -{ - struct ioc *ioc = iocg->ioc; - struct blkcg_gq *blkg = iocg_to_blkg(iocg); - u64 vtime = atomic64_read(&iocg->vtime); - u64 vmargin = ioc->margin_us * now->vrate; - u64 margin_ns = ioc->margin_us * NSEC_PER_USEC; - u64 delta_ns, expires, oexpires; - u32 hw_inuse; - - lockdep_assert_held(&iocg->waitq.lock); - - /* debt-adjust vtime */ - current_hweight(iocg, NULL, &hw_inuse); - vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); - - /* - * Clear or maintain depending on the overage. Non-zero vdebt is what - * guarantees that @iocg is online and future iocg_kick_delay() will - * clear use_delay. Don't leave it on when there's no vdebt. - */ - if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { - blkcg_clear_delay(blkg); - return false; - } - if (!atomic_read(&blkg->use_delay) && - time_before_eq64(vtime, now->vnow + vmargin)) - return false; - - /* use delay */ - delta_ns = DIV64_U64_ROUND_UP(vtime - now->vnow, - now->vrate) * NSEC_PER_USEC; - blkcg_set_delay(blkg, delta_ns); - expires = now->now_ns + delta_ns; - - /* if already active and close enough, don't bother */ - oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); - if (hrtimer_is_queued(&iocg->delay_timer) && - abs(oexpires - expires) <= margin_ns / 4) - return true; - - hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), - margin_ns / 4, HRTIMER_MODE_ABS); - return true; -} - -static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) -{ - struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); - struct ioc_now now; - unsigned long flags; - - spin_lock_irqsave(&iocg->waitq.lock, flags); - ioc_now(iocg->ioc, &now); - iocg_kick_delay(iocg, &now); - spin_unlock_irqrestore(&iocg->waitq.lock, flags); - - return HRTIMER_NORESTART; -} - static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) { u32 nr_met[2] = { }; -- cgit v1.2.3 From 7b84b49e381a8538626f200785dd918a402c62d7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:37 -0400 Subject: blk-iocost: make iocg_kick_waitq() call iocg_kick_delay() after paying debt iocg_kick_waitq() is the function which pays debt and iocg_kick_delay() updates the actual delay status accordingly. If iocg_kick_delay() is not called after iocg_kick_delay() updated debt, unnecessarily large delays can be applied temporarily. Let's make sure such conditions don't occur by making iocg_kick_waitq() always call iocg_kick_delay() after paying debt. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index ac22d761a350..b2b8dfbeee5a 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1226,6 +1226,8 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->done_vtime); iocg->abs_vdebt -= abs_delta; + + iocg_kick_delay(iocg, now); } /* @@ -1383,7 +1385,6 @@ static void ioc_timer_fn(struct timer_list *timer) if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { /* might be oversleeping vtime / hweight changes, kick */ iocg_kick_waitq(iocg, &now); - iocg_kick_delay(iocg, &now); } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ iocg->last_inuse = iocg->inuse; -- cgit v1.2.3 From fe20cdb516377e6ac6300254d5ca0dd7d8621ea8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:38 -0400 Subject: blk-iocost: s/HWEIGHT_WHOLE/WEIGHT_ONE/g We're gonna use HWEIGHT_WHOLE for regular weights too. Let's rename it to WEIGHT_ONE. Pure rename. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index b2b8dfbeee5a..5e6d56eec1c9 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -68,7 +68,7 @@ * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest, * 12.5% each. The distribution mechanism only cares about these flattened * shares. They're called hweights (hierarchical weights) and always add - * upto 1 (HWEIGHT_WHOLE). + * upto 1 (WEIGHT_ONE). * * A given cgroup's vtime runs slower in inverse proportion to its hweight. * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5) @@ -246,7 +246,7 @@ enum { MIN_VALID_USAGES = 2, /* 1/64k is granular enough and can easily be handled w/ u32 */ - HWEIGHT_WHOLE = 1 << 16, + WEIGHT_ONE = 1 << 16, /* * As vtime is used to calculate the cost of each IO, it needs to @@ -285,8 +285,8 @@ enum { * donate the surplus. */ SURPLUS_SCALE_PCT = 125, /* * 125% */ - SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */ - SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */ + SURPLUS_SCALE_ABS = WEIGHT_ONE / 50, /* + 2% */ + SURPLUS_MIN_ADJ_DELTA = WEIGHT_ONE / 33, /* 3% */ /* switch iff the conditions are met for longer than this */ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, @@ -491,7 +491,7 @@ struct ioc_gq { struct hrtimer waitq_timer; struct hrtimer delay_timer; - /* usage is recorded as fractions of HWEIGHT_WHOLE */ + /* usage is recorded as fractions of WEIGHT_ONE */ int usage_idx; u32 usages[NR_USAGE_SLOTS]; @@ -658,7 +658,7 @@ static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) */ static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse) { - return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse); + return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse); } /* @@ -666,7 +666,7 @@ static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse) */ static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse) { - return DIV64_U64_ROUND_UP(cost * hw_inuse, HWEIGHT_WHOLE); + return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE); } static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) @@ -980,7 +980,7 @@ static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep */ smp_rmb(); - hwa = hwi = HWEIGHT_WHOLE; + hwa = hwi = WEIGHT_ONE; for (lvl = 0; lvl <= iocg->level - 1; lvl++) { struct ioc_gq *parent = iocg->ancestors[lvl]; struct ioc_gq *child = iocg->ancestors[lvl + 1]; @@ -2088,8 +2088,8 @@ static void ioc_pd_init(struct blkg_policy_data *pd) atomic64_set(&iocg->done_vtime, now.vnow); atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); INIT_LIST_HEAD(&iocg->active_list); - iocg->hweight_active = HWEIGHT_WHOLE; - iocg->hweight_inuse = HWEIGHT_WHOLE; + iocg->hweight_active = WEIGHT_ONE; + iocg->hweight_inuse = WEIGHT_ONE; init_waitqueue_head(&iocg->waitq); hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -- cgit v1.2.3 From bd0adb91a68b2fbf384ace5287bb46f135e8d889 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:39 -0400 Subject: blk-iocost: use WEIGHT_ONE based fixed point number for weights To improve weight donations, we want to able to scale inuse with a greater accuracy and down below 1. Let's make non-hierarchical weights to use WEIGHT_ONE based fixed point numbers too like hierarchical ones. This doesn't cause any behavior changes yet. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 5e6d56eec1c9..00c5a3ad2b5b 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -984,8 +984,8 @@ static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep for (lvl = 0; lvl <= iocg->level - 1; lvl++) { struct ioc_gq *parent = iocg->ancestors[lvl]; struct ioc_gq *child = iocg->ancestors[lvl + 1]; - u32 active_sum = READ_ONCE(parent->child_active_sum); - u32 inuse_sum = READ_ONCE(parent->child_inuse_sum); + u64 active_sum = READ_ONCE(parent->child_active_sum); + u64 inuse_sum = READ_ONCE(parent->child_inuse_sum); u32 active = READ_ONCE(child->active); u32 inuse = READ_ONCE(child->inuse); @@ -993,11 +993,11 @@ static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep if (!active_sum || !inuse_sum) continue; - active_sum = max(active, active_sum); - hwa = hwa * active / active_sum; /* max 16bits * 10000 */ + active_sum = max_t(u64, active, active_sum); + hwa = div64_u64((u64)hwa * active, active_sum); - inuse_sum = max(inuse, inuse_sum); - hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */ + inuse_sum = max_t(u64, inuse, inuse_sum); + hwi = div64_u64((u64)hwi * inuse, inuse_sum); } iocg->hweight_active = max_t(u32, hwa, 1); @@ -1022,7 +1022,8 @@ static void weight_updated(struct ioc_gq *iocg) weight = iocg->cfg_weight ?: iocc->dfl_weight; if (weight != iocg->weight && iocg->active) propagate_weights(iocg, weight, - DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight)); + DIV64_U64_ROUND_UP((u64)iocg->inuse * weight, + iocg->weight)); iocg->weight = weight; } @@ -2050,7 +2051,7 @@ static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) if (!iocc) return NULL; - iocc->dfl_weight = CGROUP_WEIGHT_DFL; + iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE; return &iocc->cpd; } @@ -2136,7 +2137,7 @@ static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd, struct ioc_gq *iocg = pd_to_iocg(pd); if (dname && iocg->cfg_weight) - seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight); + seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE); return 0; } @@ -2146,7 +2147,7 @@ static int ioc_weight_show(struct seq_file *sf, void *v) struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); - seq_printf(sf, "default %u\n", iocc->dfl_weight); + seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE); blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill, &blkcg_policy_iocost, seq_cft(sf)->private, false); return 0; @@ -2172,7 +2173,7 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, return -EINVAL; spin_lock(&blkcg->lock); - iocc->dfl_weight = v; + iocc->dfl_weight = v * WEIGHT_ONE; hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { struct ioc_gq *iocg = blkg_to_iocg(blkg); @@ -2203,7 +2204,7 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, } spin_lock(&iocg->ioc->lock); - iocg->cfg_weight = v; + iocg->cfg_weight = v * WEIGHT_ONE; weight_updated(iocg); spin_unlock(&iocg->ioc->lock); -- cgit v1.2.3 From ce95570acf741ea306baddcb43aba0b59b920a21 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:40 -0400 Subject: blk-iocost: make ioc_now->now and ioc->period_at 64bit They are in microseconds and wrap in around 1.2 hours with u32. While unlikely, confusions from wraparounds are still possible. We aren't saving anything meaningful by keeping these u32. Let's make them u64. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 00c5a3ad2b5b..dc72cd965837 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -409,7 +409,7 @@ struct ioc { atomic64_t vtime_rate; seqcount_spinlock_t period_seqcount; - u32 period_at; /* wallclock starttime */ + u64 period_at; /* wallclock starttime */ u64 period_at_vtime; /* vtime starttime */ atomic64_t cur_period; /* inc'd each period */ @@ -508,7 +508,7 @@ struct ioc_cgrp { struct ioc_now { u64 now_ns; - u32 now; + u64 now; u64 vnow; u64 vrate; }; -- cgit v1.2.3 From 7ca5b2e60bfa4aa5b8d52e9c3e2a757c581bec1d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:41 -0400 Subject: blk-iocost: streamline vtime margin and timer slack handling The margin handling was pretty inconsistent. * ioc->margin_us and ioc->inuse_margin_vtime were used as vtime margin thresholds. However, the two are in different units with the former requiring conversion to vtime on use. * iocg_kick_waitq() was using a quarter of WAITQ_TIMER_MARGIN_PCT of period_us as the timer slack - ~1.2%. While iocg_kick_delay() was using a quarter of ioc->margin_us - ~12.5%. There aren't strong reasons to use different values for the two. This patch cleans up margin and timer slack handling: * vtime margins are now recorded in ioc->margins.{min, max} on period duration changes and used consistently. * Timer slack is now 1% of period_us and recorded in ioc->timer_slack_ns and used consistently for iocg_kick_waitq() and iocg_kick_delay(). The only functional change is shortening of timer slack. No meaningful visible change is expected. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 67 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 29 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index dc72cd965837..f36988657594 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -221,11 +221,11 @@ enum { * serves as its IO credit buffer. Surplus weight adjustment is * immediately canceled if the vtime margin runs below 10%. */ - MARGIN_PCT = 50, - INUSE_MARGIN_PCT = 10, + MARGIN_MIN_PCT = 10, + MARGIN_MAX_PCT = 50, - /* Have some play in waitq timer operations */ - WAITQ_TIMER_MARGIN_PCT = 5, + /* Have some play in timer operations */ + TIMER_SLACK_PCT = 1, /* * vtime can wrap well within a reasonable uptime when vrate is @@ -374,6 +374,11 @@ struct ioc_params { u32 too_slow_vrate_pct; }; +struct ioc_margins { + s64 min; + s64 max; +}; + struct ioc_missed { local_t nr_met; local_t nr_missed; @@ -395,8 +400,9 @@ struct ioc { bool enabled; struct ioc_params params; + struct ioc_margins margins; u32 period_us; - u32 margin_us; + u32 timer_slack_ns; u64 vrate_min; u64 vrate_max; @@ -415,7 +421,6 @@ struct ioc { atomic64_t cur_period; /* inc'd each period */ int busy_level; /* saturation history */ - u64 inuse_margin_vtime; bool weights_updated; atomic_t hweight_gen; /* for lazy hweights */ @@ -678,6 +683,16 @@ static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) #define CREATE_TRACE_POINTS #include +static void ioc_refresh_margins(struct ioc *ioc) +{ + struct ioc_margins *margins = &ioc->margins; + u32 period_us = ioc->period_us; + u64 vrate = atomic64_read(&ioc->vtime_rate); + + margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; + margins->max = (period_us * MARGIN_MAX_PCT / 100) * vrate; +} + /* latency Qos params changed, update period_us and all the dependent params */ static void ioc_refresh_period_us(struct ioc *ioc) { @@ -711,9 +726,10 @@ static void ioc_refresh_period_us(struct ioc *ioc) /* calculate dependent params */ ioc->period_us = period_us; - ioc->margin_us = period_us * MARGIN_PCT / 100; - ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP( - period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100); + ioc->timer_slack_ns = div64_u64( + (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT, + 100); + ioc_refresh_margins(ioc); } static int ioc_autop_idx(struct ioc *ioc) @@ -1031,7 +1047,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; u64 last_period, cur_period, max_period_delta; - u64 vtime, vmargin, vmin; + u64 vtime, vmin; int i; /* @@ -1077,8 +1093,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) */ max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us); vtime = atomic64_read(&iocg->vtime); - vmargin = ioc->margin_us * now->vrate; - vmin = now->vnow - vmargin; + vmin = now->vnow - ioc->margins.max; if (last_period + max_period_delta < cur_period || time_before64(vtime, vmin)) { @@ -1121,8 +1136,6 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); u64 vtime = atomic64_read(&iocg->vtime); - u64 vmargin = ioc->margin_us * now->vrate; - u64 margin_ns = ioc->margin_us * NSEC_PER_USEC; u64 delta_ns, expires, oexpires; u32 hw_inuse; @@ -1142,7 +1155,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) return false; } if (!atomic_read(&blkg->use_delay) && - time_before_eq64(vtime, now->vnow + vmargin)) + time_before_eq64(vtime, now->vnow + ioc->margins.max)) return false; /* use delay */ @@ -1154,11 +1167,11 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) /* if already active and close enough, don't bother */ oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); if (hrtimer_is_queued(&iocg->delay_timer) && - abs(oexpires - expires) <= margin_ns / 4) + abs(oexpires - expires) <= ioc->timer_slack_ns) return true; hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), - margin_ns / 4, HRTIMER_MODE_ABS); + ioc->timer_slack_ns, HRTIMER_MODE_ABS); return true; } @@ -1206,8 +1219,6 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; struct iocg_wake_ctx ctx = { .iocg = iocg }; - u64 margin_ns = (u64)(ioc->period_us * - WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC; u64 vdebt, vshortage, expires, oexpires; s64 vbudget; u32 hw_inuse; @@ -1243,20 +1254,20 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) if (WARN_ON_ONCE(ctx.vbudget >= 0)) return; - /* determine next wakeup, add a quarter margin to guarantee chunking */ + /* determine next wakeup, add a timer margin to guarantee chunking */ vshortage = -ctx.vbudget; expires = now->now_ns + DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC; - expires += margin_ns / 4; + expires += ioc->timer_slack_ns; /* if already active and close enough, don't bother */ oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer)); if (hrtimer_is_queued(&iocg->waitq_timer) && - abs(oexpires - expires) <= margin_ns / 4) + abs(oexpires - expires) <= ioc->timer_slack_ns) return; hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires), - margin_ns / 4, HRTIMER_MODE_ABS); + ioc->timer_slack_ns, HRTIMER_MODE_ABS); } static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) @@ -1399,7 +1410,7 @@ static void ioc_timer_fn(struct timer_list *timer) /* calc usages and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - u64 vdone, vtime, vusage, vmargin, vmin; + u64 vdone, vtime, vusage, vmin; u32 hw_active, hw_inuse, usage; /* @@ -1450,8 +1461,7 @@ static void ioc_timer_fn(struct timer_list *timer) } /* see whether there's surplus vtime */ - vmargin = ioc->margin_us * now.vrate; - vmin = now.vnow - vmargin; + vmin = now.vnow - ioc->margins.max; iocg->has_surplus = false; @@ -1623,8 +1633,7 @@ skip_surplus_transfers: nr_surpluses); atomic64_set(&ioc->vtime_rate, vrate); - ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP( - ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); + ioc_refresh_margins(ioc); } else if (ioc->busy_level != prev_busy_level || nr_lagging) { trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), missed_ppm, rq_wait_pct, nr_lagging, @@ -1754,7 +1763,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) current_hweight(iocg, &hw_active, &hw_inuse); if (hw_inuse < hw_active && - time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) { + time_after_eq64(vtime + ioc->margins.min, now.vnow)) { TRACE_IOCG_PATH(inuse_reset, iocg, &now, iocg->inuse, iocg->weight, hw_inuse, hw_active); spin_lock_irq(&ioc->lock); -- cgit v1.2.3 From da437b95db83106319d337a5859e6dfb464b085a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:42 -0400 Subject: blk-iocost: grab ioc->lock for debt handling Currently, debt handling requires only iocg->waitq.lock. In the future, we want to adjust and propagate inuse changes depending on debt status. Let's grab ioc->lock in debt handling paths in preparation. * Because ioc->lock nests outside iocg->waitq.lock, the decision to grab ioc->lock needs to be made before entering the critical sections. * Add and use iocg_[un]lock() which handles the conditional double locking. * Add @pay_debt to iocg_kick_waitq() so that debt payment happens only when the caller grabbed both locks. This patch is prepatory and the comments contain references to future changes. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 92 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 73 insertions(+), 19 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index f36988657594..23b173e34591 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -680,6 +680,26 @@ static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) atomic64_add(cost, &iocg->vtime); } +static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) +{ + if (lock_ioc) { + spin_lock_irqsave(&iocg->ioc->lock, *flags); + spin_lock(&iocg->waitq.lock); + } else { + spin_lock_irqsave(&iocg->waitq.lock, *flags); + } +} + +static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags) +{ + if (unlock_ioc) { + spin_unlock(&iocg->waitq.lock); + spin_unlock_irqrestore(&iocg->ioc->lock, *flags); + } else { + spin_unlock_irqrestore(&iocg->waitq.lock, *flags); + } +} + #define CREATE_TRACE_POINTS #include @@ -1215,11 +1235,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, return 0; } -static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) +/* + * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters + * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in + * addition to iocg->waitq.lock. + */ +static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, + struct ioc_now *now) { struct ioc *ioc = iocg->ioc; struct iocg_wake_ctx ctx = { .iocg = iocg }; - u64 vdebt, vshortage, expires, oexpires; + u64 vshortage, expires, oexpires; s64 vbudget; u32 hw_inuse; @@ -1229,25 +1255,39 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) vbudget = now->vnow - atomic64_read(&iocg->vtime); /* pay off debt */ - vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); - if (vdebt && vbudget > 0) { + if (pay_debt && iocg->abs_vdebt && vbudget > 0) { + u64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); u64 delta = min_t(u64, vbudget, vdebt); u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), iocg->abs_vdebt); + lockdep_assert_held(&ioc->lock); + atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->done_vtime); iocg->abs_vdebt -= abs_delta; + vbudget -= vdebt; iocg_kick_delay(iocg, now); } + /* + * Debt can still be outstanding if we haven't paid all yet or the + * caller raced and called without @pay_debt. Shouldn't wake up waiters + * under debt. Make sure @vbudget reflects the outstanding amount and is + * not positive. + */ + if (iocg->abs_vdebt) { + s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + vbudget = min_t(s64, 0, vbudget - vdebt); + } + /* * Wake up the ones which are due and see how much vtime we'll need * for the next one. */ ctx.hw_inuse = hw_inuse; - ctx.vbudget = vbudget - vdebt; + ctx.vbudget = vbudget; __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); if (!waitqueue_active(&iocg->waitq)) return; @@ -1273,14 +1313,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now) static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) { struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer); + bool pay_debt = READ_ONCE(iocg->abs_vdebt); struct ioc_now now; unsigned long flags; ioc_now(iocg->ioc, &now); - spin_lock_irqsave(&iocg->waitq.lock, flags); - iocg_kick_waitq(iocg, &now); - spin_unlock_irqrestore(&iocg->waitq.lock, flags); + iocg_lock(iocg, pay_debt, &flags); + iocg_kick_waitq(iocg, pay_debt, &now); + iocg_unlock(iocg, pay_debt, &flags); return HRTIMER_NORESTART; } @@ -1396,7 +1437,7 @@ static void ioc_timer_fn(struct timer_list *timer) if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { /* might be oversleeping vtime / hweight changes, kick */ - iocg_kick_waitq(iocg, &now); + iocg_kick_waitq(iocg, true, &now); } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ iocg->last_inuse = iocg->inuse; @@ -1743,6 +1784,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) struct iocg_wait wait; u32 hw_active, hw_inuse; u64 abs_cost, cost, vtime; + bool use_debt, ioc_locked; + unsigned long flags; /* bypass IOs if disabled or for root cgroup */ if (!ioc->enabled || !iocg->level) @@ -1786,15 +1829,26 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) } /* - * We activated above but w/o any synchronization. Deactivation is - * synchronized with waitq.lock and we won't get deactivated as long - * as we're waiting or has debt, so we're good if we're activated - * here. In the unlikely case that we aren't, just issue the IO. + * We're over budget. This can be handled in two ways. IOs which may + * cause priority inversions are punted to @ioc->aux_iocg and charged as + * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling + * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine + * whether debt handling is needed and acquire locks accordingly. */ - spin_lock_irq(&iocg->waitq.lock); + use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current); + ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt); + iocg_lock(iocg, ioc_locked, &flags); + + /* + * @iocg must stay activated for debt and waitq handling. Deactivation + * is synchronized against both ioc->lock and waitq.lock and we won't + * get deactivated as long as we're waiting or has debt, so we're good + * if we're activated here. In the unlikely cases that we aren't, just + * issue the IO. + */ if (unlikely(list_empty(&iocg->active_list))) { - spin_unlock_irq(&iocg->waitq.lock); + iocg_unlock(iocg, ioc_locked, &flags); iocg_commit_bio(iocg, bio, cost); return; } @@ -1816,12 +1870,12 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) * clear them and leave @iocg inactive w/ dangling use_delay heavily * penalizing the cgroup and its descendants. */ - if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { + if (use_debt) { iocg->abs_vdebt += abs_cost; if (iocg_kick_delay(iocg, &now)) blkcg_schedule_throttle(rqos->q, (bio->bi_opf & REQ_SWAP) == REQ_SWAP); - spin_unlock_irq(&iocg->waitq.lock); + iocg_unlock(iocg, ioc_locked, &flags); return; } @@ -1845,9 +1899,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) wait.committed = false; /* will be set true by waker */ __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait); - iocg_kick_waitq(iocg, &now); + iocg_kick_waitq(iocg, ioc_locked, &now); - spin_unlock_irq(&iocg->waitq.lock); + iocg_unlock(iocg, ioc_locked, &flags); while (true) { set_current_state(TASK_UNINTERRUPTIBLE); -- cgit v1.2.3 From 97eb19751f155903489f8d42ea658ace2c59faf7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:43 -0400 Subject: blk-iocost: add absolute usage stat Currently, iocost doesn't collect or expose any statistics punting off all monitoring duties to drgn based iocost_monitor.py. While it works for some scenarios, there are some usability and data availability challenges. For example, accurate per-cgroup usage information can't be tracked by vtime progression at all and the number available in iocg->usages[] are really short-term snapshots used for control heuristics with possibly significant errors. This patch implements per-cgroup absolute usage stat counter and exposes it through io.stat along with the current vrate. Usage stat collection and flushing employ the same method as cgroup rstat on the active iocg's and the only hot path overhead is preemption toggling and adding to a percpu counter. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 155 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 149 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 23b173e34591..f30f9b37fcf0 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -431,6 +431,14 @@ struct ioc { bool user_cost_model:1; }; +struct iocg_pcpu_stat { + local64_t abs_vusage; +}; + +struct iocg_stat { + u64 usage_us; +}; + /* per device-cgroup pair */ struct ioc_gq { struct blkg_policy_data pd; @@ -492,10 +500,19 @@ struct ioc_gq { u32 hweight_inuse; bool has_surplus; + struct list_head walk_list; + struct wait_queue_head waitq; struct hrtimer waitq_timer; struct hrtimer delay_timer; + /* statistics */ + struct iocg_pcpu_stat __percpu *pcpu_stat; + struct iocg_stat local_stat; + struct iocg_stat desc_stat; + struct iocg_stat last_stat; + u64 last_stat_abs_vusage; + /* usage is recorded as fractions of WEIGHT_ONE */ int usage_idx; u32 usages[NR_USAGE_SLOTS]; @@ -674,10 +691,17 @@ static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse) return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE); } -static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) +static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, + u64 abs_cost, u64 cost) { + struct iocg_pcpu_stat *gcs; + bio->bi_iocost_cost = cost; atomic64_add(cost, &iocg->vtime); + + gcs = get_cpu_ptr(iocg->pcpu_stat); + local64_add(abs_cost, &gcs->abs_vusage); + put_cpu_ptr(gcs); } static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) @@ -1221,7 +1245,7 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, if (ctx->vbudget < 0) return -1; - iocg_commit_bio(ctx->iocg, wait->bio, cost); + iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); /* * autoremove_wake_function() removes the wait entry only when it @@ -1382,6 +1406,87 @@ static bool iocg_is_idle(struct ioc_gq *iocg) return true; } +/* + * Call this function on the target leaf @iocg's to build pre-order traversal + * list of all the ancestors in @inner_walk. The inner nodes are linked through + * ->walk_list and the caller is responsible for dissolving the list after use. + */ +static void iocg_build_inner_walk(struct ioc_gq *iocg, + struct list_head *inner_walk) +{ + int lvl; + + WARN_ON_ONCE(!list_empty(&iocg->walk_list)); + + /* find the first ancestor which hasn't been visited yet */ + for (lvl = iocg->level - 1; lvl >= 0; lvl--) { + if (!list_empty(&iocg->ancestors[lvl]->walk_list)) + break; + } + + /* walk down and visit the inner nodes to get pre-order traversal */ + while (++lvl <= iocg->level - 1) { + struct ioc_gq *inner = iocg->ancestors[lvl]; + + /* record traversal order */ + list_add_tail(&inner->walk_list, inner_walk); + } +} + +/* collect per-cpu counters and propagate the deltas to the parent */ +static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) +{ + struct iocg_stat new_stat; + u64 abs_vusage = 0; + u64 vusage_delta; + int cpu; + + lockdep_assert_held(&iocg->ioc->lock); + + /* collect per-cpu counters */ + for_each_possible_cpu(cpu) { + abs_vusage += local64_read( + per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); + } + vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; + iocg->last_stat_abs_vusage = abs_vusage; + + iocg->local_stat.usage_us += div64_u64(vusage_delta, now->vrate); + + new_stat.usage_us = + iocg->local_stat.usage_us + iocg->desc_stat.usage_us; + + /* propagate the deltas to the parent */ + if (iocg->level > 0) { + struct iocg_stat *parent_stat = + &iocg->ancestors[iocg->level - 1]->desc_stat; + + parent_stat->usage_us += + new_stat.usage_us - iocg->last_stat.usage_us; + } + + iocg->last_stat = new_stat; +} + +/* get stat counters ready for reading on all active iocgs */ +static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now) +{ + LIST_HEAD(inner_walk); + struct ioc_gq *iocg, *tiocg; + + /* flush leaves and build inner node walk list */ + list_for_each_entry(iocg, target_iocgs, active_list) { + iocg_flush_stat_one(iocg, now); + iocg_build_inner_walk(iocg, &inner_walk); + } + + /* keep flushing upwards by walking the inner list backwards */ + list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) { + iocg_flush_stat_one(iocg, now); + list_del_init(&iocg->walk_list); + } +} + /* returns usage with margin added if surplus is large enough */ static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse) { @@ -1422,6 +1527,8 @@ static void ioc_timer_fn(struct timer_list *timer) return; } + iocg_flush_stat(&ioc->active_iocgs, &now); + /* * Waiters determine the sleep durations based on the vrate they * saw at the time of sleep. If vrate has increased, some waiters @@ -1824,7 +1931,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && time_before_eq64(vtime + cost, now.vnow)) { - iocg_commit_bio(iocg, bio, cost); + iocg_commit_bio(iocg, bio, abs_cost, cost); return; } @@ -1849,7 +1956,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ if (unlikely(list_empty(&iocg->active_list))) { iocg_unlock(iocg, ioc_locked, &flags); - iocg_commit_bio(iocg, bio, cost); + iocg_commit_bio(iocg, bio, abs_cost, cost); return; } @@ -1948,7 +2055,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, */ if (rq->bio && rq->bio->bi_iocost_cost && time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { - iocg_commit_bio(iocg, bio, cost); + iocg_commit_bio(iocg, bio, abs_cost, cost); return; } @@ -1962,7 +2069,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, iocg->abs_vdebt += abs_cost; iocg_kick_delay(iocg, &now); } else { - iocg_commit_bio(iocg, bio, cost); + iocg_commit_bio(iocg, bio, abs_cost, cost); } spin_unlock_irqrestore(&iocg->waitq.lock, flags); } @@ -2133,6 +2240,12 @@ static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q, if (!iocg) return NULL; + iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp); + if (!iocg->pcpu_stat) { + kfree(iocg); + return NULL; + } + return &iocg->pd; } @@ -2152,6 +2265,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd) atomic64_set(&iocg->done_vtime, now.vnow); atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); INIT_LIST_HEAD(&iocg->active_list); + INIT_LIST_HEAD(&iocg->walk_list); iocg->hweight_active = WEIGHT_ONE; iocg->hweight_inuse = WEIGHT_ONE; @@ -2181,18 +2295,46 @@ static void ioc_pd_free(struct blkg_policy_data *pd) if (ioc) { spin_lock_irqsave(&ioc->lock, flags); + if (!list_empty(&iocg->active_list)) { propagate_weights(iocg, 0, 0); list_del_init(&iocg->active_list); } + + WARN_ON_ONCE(!list_empty(&iocg->walk_list)); + spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); hrtimer_cancel(&iocg->delay_timer); } + free_percpu(iocg->pcpu_stat); kfree(iocg); } +static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) +{ + struct ioc_gq *iocg = pd_to_iocg(pd); + struct ioc *ioc = iocg->ioc; + size_t pos = 0; + + if (!ioc->enabled) + return 0; + + if (iocg->level == 0) { + unsigned vp10k = DIV64_U64_ROUND_CLOSEST( + atomic64_read(&ioc->vtime_rate) * 10000, + VTIME_PER_USEC); + pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u", + vp10k / 100, vp10k % 100); + } + + pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu", + iocg->last_stat.usage_us); + + return pos; +} + static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd, int off) { @@ -2606,6 +2748,7 @@ static struct blkcg_policy blkcg_policy_iocost = { .pd_alloc_fn = ioc_pd_alloc, .pd_init_fn = ioc_pd_init, .pd_free_fn = ioc_pd_free, + .pd_stat_fn = ioc_pd_stat, }; static int __init ioc_init(void) -- cgit v1.2.3 From 1aa50d020c7148f5f0bde15ca80fe6f91a8c5a4e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:44 -0400 Subject: blk-iocost: calculate iocg->usages[] from iocg->local_stat.usage_us Currently, iocg->usages[] which are used to guide inuse adjustments are calculated from vtime deltas. This, however, assumes that the hierarchical inuse weight at the time of calculation held for the entire period, which often isn't true and can lead to significant errors. Now that we have absolute usage information collected, we can derive iocg->usages[] from iocg->local_stat.usage_us so that inuse adjustment decisions are made based on actual absolute usage. The calculated usage is clamped between 1 and WEIGHT_ONE and WEIGHT_ONE is also used to signal saturation regardless of the current hierarchical inuse weight. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 72 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 27 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index f30f9b37fcf0..2496674bbbf4 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -476,14 +476,10 @@ struct ioc_gq { * `vtime_done` is the same but progressed on completion rather * than issue. The delta behind `vtime` represents the cost of * currently in-flight IOs. - * - * `last_vtime` is used to remember `vtime` at the end of the last - * period to calculate utilization. */ atomic64_t vtime; atomic64_t done_vtime; u64 abs_vdebt; - u64 last_vtime; /* * The period this iocg was last active in. Used for deactivation @@ -506,6 +502,9 @@ struct ioc_gq { struct hrtimer waitq_timer; struct hrtimer delay_timer; + /* timestamp at the latest activation */ + u64 activated_at; + /* statistics */ struct iocg_pcpu_stat __percpu *pcpu_stat; struct iocg_stat local_stat; @@ -514,6 +513,7 @@ struct ioc_gq { u64 last_stat_abs_vusage; /* usage is recorded as fractions of WEIGHT_ONE */ + u32 usage_delta_us; int usage_idx; u32 usages[NR_USAGE_SLOTS]; @@ -1159,7 +1159,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) TRACE_IOCG_PATH(iocg_activate, iocg, now, last_period, cur_period, vtime); - iocg->last_vtime = vtime; + iocg->activated_at = now->now; if (ioc->running == IOC_IDLE) { ioc->running = IOC_RUNNING; @@ -1451,7 +1451,8 @@ static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; iocg->last_stat_abs_vusage = abs_vusage; - iocg->local_stat.usage_us += div64_u64(vusage_delta, now->vrate); + iocg->usage_delta_us = div64_u64(vusage_delta, now->vrate); + iocg->local_stat.usage_us += iocg->usage_delta_us; new_stat.usage_us = iocg->local_stat.usage_us + iocg->desc_stat.usage_us; @@ -1558,8 +1559,9 @@ static void ioc_timer_fn(struct timer_list *timer) /* calc usages and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - u64 vdone, vtime, vusage, vmin; + u64 vdone, vtime, usage_us, vmin; u32 hw_active, hw_inuse, usage; + int uidx; /* * Collect unused and wind vtime closer to vnow to prevent @@ -1583,27 +1585,44 @@ static void ioc_timer_fn(struct timer_list *timer) time_before64(vdone, now.vnow - period_vtime)) nr_lagging++; - if (waitqueue_active(&iocg->waitq)) - vusage = now.vnow - iocg->last_vtime; - else if (time_before64(iocg->last_vtime, vtime)) - vusage = vtime - iocg->last_vtime; - else - vusage = 0; - - iocg->last_vtime += vusage; /* - * Factor in in-flight vtime into vusage to avoid - * high-latency completions appearing as idle. This should - * be done after the above ->last_time adjustment. + * Determine absolute usage factoring in pending and in-flight + * IOs to avoid stalls and high-latency completions appearing as + * idle. */ - vusage = max(vusage, vtime - vdone); - - /* calculate hweight based usage ratio and record */ - if (vusage) { - usage = DIV64_U64_ROUND_UP(vusage * hw_inuse, - period_vtime); - iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS; - iocg->usages[iocg->usage_idx] = usage; + usage_us = iocg->usage_delta_us; + if (waitqueue_active(&iocg->waitq) && time_before64(vtime, now.vnow)) + usage_us += DIV64_U64_ROUND_UP( + cost_to_abs_cost(now.vnow - vtime, hw_inuse), + now.vrate); + if (vdone != vtime) { + u64 inflight_us = DIV64_U64_ROUND_UP( + cost_to_abs_cost(vtime - vdone, hw_inuse), + now.vrate); + usage_us = max(usage_us, inflight_us); + } + + /* convert to hweight based usage ratio and record */ + uidx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS; + + if (time_after64(vtime, now.vnow - ioc->margins.min)) { + iocg->usage_idx = uidx; + iocg->usages[uidx] = WEIGHT_ONE; + } else if (usage_us) { + u64 started_at, dur; + + if (time_after64(iocg->activated_at, ioc->period_at)) + started_at = iocg->activated_at; + else + started_at = ioc->period_at; + + dur = max_t(u64, now.now - started_at, 1); + usage = clamp_t(u32, + DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, dur), + 1, WEIGHT_ONE); + + iocg->usage_idx = uidx; + iocg->usages[uidx] = usage; } else { usage = 0; } @@ -1620,7 +1639,6 @@ static void ioc_timer_fn(struct timer_list *timer) /* throw away surplus vtime */ atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->done_vtime); - iocg->last_vtime += delta; /* if usage is sufficiently low, maybe it can donate */ if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) { iocg->has_surplus = true; -- cgit v1.2.3 From 8692d2db8e01f1a5718f9b67c219b823a64b2088 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:45 -0400 Subject: blk-iocost: replace iocg->has_surplus with ->surplus_list Instead of marking iocgs with surplus with a flag and filtering for them while walking all active iocgs, build a surpluses list. This doesn't make much difference now but will help implementing improved donation logic which will iterate iocgs with surplus multiple times. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 2496674bbbf4..c1cd66cfa2a8 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -494,9 +494,9 @@ struct ioc_gq { int hweight_gen; u32 hweight_active; u32 hweight_inuse; - bool has_surplus; struct list_head walk_list; + struct list_head surplus_list; struct wait_queue_head waitq; struct hrtimer waitq_timer; @@ -1507,6 +1507,7 @@ static void ioc_timer_fn(struct timer_list *timer) struct ioc *ioc = container_of(timer, struct ioc, timer); struct ioc_gq *iocg, *tiocg; struct ioc_now now; + LIST_HEAD(surpluses); int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0; u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; @@ -1630,8 +1631,7 @@ static void ioc_timer_fn(struct timer_list *timer) /* see whether there's surplus vtime */ vmin = now.vnow - ioc->margins.max; - iocg->has_surplus = false; - + WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); if (!waitqueue_active(&iocg->waitq) && time_before64(vtime, vmin)) { u64 delta = vmin - vtime; @@ -1641,7 +1641,7 @@ static void ioc_timer_fn(struct timer_list *timer) atomic64_add(delta, &iocg->done_vtime); /* if usage is sufficiently low, maybe it can donate */ if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) { - iocg->has_surplus = true; + list_add(&iocg->surplus_list, &surpluses); nr_surpluses++; } } else if (hw_inuse < hw_active) { @@ -1677,13 +1677,10 @@ static void ioc_timer_fn(struct timer_list *timer) goto skip_surplus_transfers; /* there are both shortages and surpluses, transfer surpluses */ - list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { + list_for_each_entry(iocg, &surpluses, surplus_list) { u32 usage, hw_active, hw_inuse, new_hwi, new_inuse; int nr_valid = 0; - if (!iocg->has_surplus) - continue; - /* base the decision on max historical usage */ for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) { if (iocg->usages[i]) { @@ -1711,6 +1708,10 @@ static void ioc_timer_fn(struct timer_list *timer) skip_surplus_transfers: commit_weights(ioc); + /* surplus list should be dissolved after use */ + list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) + list_del_init(&iocg->surplus_list); + /* * If q is getting clogged or we're missing too much, we're issuing * too much IO and should lower vtime rate. If we're not missing @@ -2284,6 +2285,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd) atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); INIT_LIST_HEAD(&iocg->active_list); INIT_LIST_HEAD(&iocg->walk_list); + INIT_LIST_HEAD(&iocg->surplus_list); iocg->hweight_active = WEIGHT_ONE; iocg->hweight_inuse = WEIGHT_ONE; @@ -2320,6 +2322,7 @@ static void ioc_pd_free(struct blkg_policy_data *pd) } WARN_ON_ONCE(!list_empty(&iocg->walk_list)); + WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); spin_unlock_irqrestore(&ioc->lock, flags); -- cgit v1.2.3 From 065655c862fedf4b04e1b28b83ca6f338d81cf0b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:46 -0400 Subject: blk-iocost: decouple vrate adjustment from surplus transfers Budget donations are inaccurate and could take multiple periods to converge. To prevent triggering vrate adjustments while surplus transfers were catching up, vrate adjustment was suppressed if donations were increasing, which was indicated by non-zero nr_surpluses. This entangling won't be necessary with the scheduled rewrite of donation mechanism which will make it precise and immediate. Let's decouple the two in preparation. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index c1cd66cfa2a8..a3889a8b0a33 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1508,7 +1508,7 @@ static void ioc_timer_fn(struct timer_list *timer) struct ioc_gq *iocg, *tiocg; struct ioc_now now; LIST_HEAD(surpluses); - int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0; + int nr_shortages = 0, nr_lagging = 0; u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; u32 missed_ppm[2], rq_wait_pct; @@ -1640,10 +1640,8 @@ static void ioc_timer_fn(struct timer_list *timer) atomic64_add(delta, &iocg->vtime); atomic64_add(delta, &iocg->done_vtime); /* if usage is sufficiently low, maybe it can donate */ - if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) { + if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) list_add(&iocg->surplus_list, &surpluses); - nr_surpluses++; - } } else if (hw_inuse < hw_active) { u32 new_hwi, new_inuse; @@ -1673,7 +1671,7 @@ static void ioc_timer_fn(struct timer_list *timer) } } - if (!nr_shortages || !nr_surpluses) + if (!nr_shortages || list_empty(&surpluses)) goto skip_surplus_transfers; /* there are both shortages and surpluses, transfer surpluses */ @@ -1738,11 +1736,9 @@ skip_surplus_transfers: /* * If there are IOs spanning multiple periods, wait - * them out before pushing the device harder. If - * there are surpluses, let redistribution work it - * out first. + * them out before pushing the device harder. */ - if (!nr_lagging && !nr_surpluses) + if (!nr_lagging) ioc->busy_level--; } else { /* @@ -1796,15 +1792,14 @@ skip_surplus_transfers: } trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, - nr_lagging, nr_shortages, - nr_surpluses); + nr_lagging, nr_shortages); atomic64_set(&ioc->vtime_rate, vrate); ioc_refresh_margins(ioc); } else if (ioc->busy_level != prev_busy_level || nr_lagging) { trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), missed_ppm, rq_wait_pct, nr_lagging, - nr_shortages, nr_surpluses); + nr_shortages); } ioc_refresh_params(ioc, false); -- cgit v1.2.3 From 93f7d2db80e4aea2731619d7b907a029e0d14259 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:47 -0400 Subject: blk-iocost: restructure surplus donation logic The way the surplus donation logic is structured isn't great. There are two separate paths for starting/increasing donations and decreasing them making the logic harder to follow and is prone to unnecessary behavior differences. In preparation for improved donation handling, this patch restructures the code so that * All donors - new, increasing and decreasing - are funneled through the same code path. * The target donation calculation is factored into hweight_after_donation() which is called once from the same spot for all possible donors. * Actual inuse adjustment is factored into trasnfer_surpluses(). This change introduces a few behavior differences - e.g. donation amount reduction now uses the max usage of the recent three periods just like new and increasing donations, and inuse now gets adjusted upwards the same way it gets downwards. These differences are unlikely to have severely negative implications and the whole logic will be revamped soon. This patch also removes two tracepoints. The existing TPs don't quite fit the new implementation. A later patch will update and reinstate them. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 179 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 103 insertions(+), 76 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index a3889a8b0a33..61b008d0801f 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -494,6 +494,7 @@ struct ioc_gq { int hweight_gen; u32 hweight_active; u32 hweight_inuse; + u32 hweight_after_donation; struct list_head walk_list; struct list_head surplus_list; @@ -1070,6 +1071,32 @@ out: *hw_inusep = iocg->hweight_inuse; } +/* + * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the + * other weights stay unchanged. + */ +static u32 current_hweight_max(struct ioc_gq *iocg) +{ + u32 hwm = WEIGHT_ONE; + u32 inuse = iocg->active; + u64 child_inuse_sum; + int lvl; + + lockdep_assert_held(&iocg->ioc->lock); + + for (lvl = iocg->level - 1; lvl >= 0; lvl--) { + struct ioc_gq *parent = iocg->ancestors[lvl]; + struct ioc_gq *child = iocg->ancestors[lvl + 1]; + + child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse; + hwm = div64_u64((u64)hwm * inuse, child_inuse_sum); + inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum, + parent->child_active_sum); + } + + return max_t(u32, hwm, 1); +} + static void weight_updated(struct ioc_gq *iocg) { struct ioc *ioc = iocg->ioc; @@ -1488,20 +1515,58 @@ static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now) } } -/* returns usage with margin added if surplus is large enough */ -static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse) +/* + * Determine what @iocg's hweight_inuse should be after donating unused + * capacity. @hwm is the upper bound and used to signal no donation. This + * function also throws away @iocg's excess budget. + */ +static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, + struct ioc_now *now) { + struct ioc *ioc = iocg->ioc; + u64 vtime = atomic64_read(&iocg->vtime); + s64 excess; + + /* see whether minimum margin requirement is met */ + if (waitqueue_active(&iocg->waitq) || + time_after64(vtime, now->vnow - ioc->margins.min)) + return hwm; + + /* throw away excess above max */ + excess = now->vnow - vtime - ioc->margins.max; + if (excess > 0) { + atomic64_add(excess, &iocg->vtime); + atomic64_add(excess, &iocg->done_vtime); + vtime += excess; + } + /* add margin */ usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100); usage += SURPLUS_SCALE_ABS; /* don't bother if the surplus is too small */ - if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse) - return 0; + if (usage + SURPLUS_MIN_ADJ_DELTA > hwm) + return hwm; return usage; } +static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) +{ + struct ioc_gq *iocg; + + list_for_each_entry(iocg, surpluses, surplus_list) { + u32 old_hwi, new_hwi, new_inuse; + + current_hweight(iocg, NULL, &old_hwi); + new_hwi = iocg->hweight_after_donation; + + new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi, + old_hwi); + __propagate_weights(iocg, iocg->weight, new_inuse); + } +} + static void ioc_timer_fn(struct timer_list *timer) { struct ioc *ioc = container_of(timer, struct ioc, timer); @@ -1560,9 +1625,9 @@ static void ioc_timer_fn(struct timer_list *timer) /* calc usages and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - u64 vdone, vtime, usage_us, vmin; + u64 vdone, vtime, usage_us; u32 hw_active, hw_inuse, usage; - int uidx; + int uidx, nr_valid; /* * Collect unused and wind vtime closer to vnow to prevent @@ -1618,92 +1683,54 @@ static void ioc_timer_fn(struct timer_list *timer) started_at = ioc->period_at; dur = max_t(u64, now.now - started_at, 1); - usage = clamp_t(u32, + + iocg->usage_idx = uidx; + iocg->usages[uidx] = clamp_t(u32, DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, dur), 1, WEIGHT_ONE); + } - iocg->usage_idx = uidx; - iocg->usages[uidx] = usage; - } else { - usage = 0; + /* base the decision on max historical usage */ + for (i = 0, usage = 0, nr_valid = 0; i < NR_USAGE_SLOTS; i++) { + if (iocg->usages[i]) { + usage = max(usage, iocg->usages[i]); + nr_valid++; + } } + if (nr_valid < MIN_VALID_USAGES) + usage = WEIGHT_ONE; /* see whether there's surplus vtime */ - vmin = now.vnow - ioc->margins.max; - WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); - if (!waitqueue_active(&iocg->waitq) && - time_before64(vtime, vmin)) { - u64 delta = vmin - vtime; - - /* throw away surplus vtime */ - atomic64_add(delta, &iocg->vtime); - atomic64_add(delta, &iocg->done_vtime); - /* if usage is sufficiently low, maybe it can donate */ - if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) - list_add(&iocg->surplus_list, &surpluses); - } else if (hw_inuse < hw_active) { - u32 new_hwi, new_inuse; + if (hw_inuse < hw_active || + (!waitqueue_active(&iocg->waitq) && + time_before64(vtime, now.vnow - ioc->margins.max))) { + u32 hwm, new_hwi; - /* was donating but might need to take back some */ - if (waitqueue_active(&iocg->waitq)) { - new_hwi = hw_active; + /* + * Already donating or accumulated enough to start. + * Determine the donation amount. + */ + hwm = current_hweight_max(iocg); + new_hwi = hweight_after_donation(iocg, hwm, usage, + &now); + if (new_hwi < hwm) { + iocg->hweight_after_donation = new_hwi; + list_add(&iocg->surplus_list, &surpluses); } else { - new_hwi = max(hw_inuse, - usage * SURPLUS_SCALE_PCT / 100 + - SURPLUS_SCALE_ABS); - } - - new_inuse = div64_u64((u64)iocg->inuse * new_hwi, - hw_inuse); - new_inuse = clamp_t(u32, new_inuse, 1, iocg->active); - - if (new_inuse > iocg->inuse) { - TRACE_IOCG_PATH(inuse_takeback, iocg, &now, - iocg->inuse, new_inuse, - hw_inuse, new_hwi); - __propagate_weights(iocg, iocg->weight, - new_inuse); + __propagate_weights(iocg, iocg->active, + iocg->active); + nr_shortages++; } } else { - /* genuninely out of vtime */ + /* genuinely short on vtime */ nr_shortages++; } } - if (!nr_shortages || list_empty(&surpluses)) - goto skip_surplus_transfers; + if (!list_empty(&surpluses) && nr_shortages) + transfer_surpluses(&surpluses, &now); - /* there are both shortages and surpluses, transfer surpluses */ - list_for_each_entry(iocg, &surpluses, surplus_list) { - u32 usage, hw_active, hw_inuse, new_hwi, new_inuse; - int nr_valid = 0; - - /* base the decision on max historical usage */ - for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) { - if (iocg->usages[i]) { - usage = max(usage, iocg->usages[i]); - nr_valid++; - } - } - if (nr_valid < MIN_VALID_USAGES) - continue; - - current_hweight(iocg, &hw_active, &hw_inuse); - new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse); - if (!new_hwi) - continue; - - new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi, - hw_inuse); - if (new_inuse < iocg->inuse) { - TRACE_IOCG_PATH(inuse_giveaway, iocg, &now, - iocg->inuse, new_inuse, - hw_inuse, new_hwi); - __propagate_weights(iocg, iocg->weight, new_inuse); - } - } -skip_surplus_transfers: commit_weights(ioc); /* surplus list should be dissolved after use */ -- cgit v1.2.3 From e08d02aa5fc9978ed42240bd80d3fb5dd28ecac0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:48 -0400 Subject: blk-iocost: implement Andy's method for donation weight updates iocost implements work conservation by reducing iocg->inuse and propagating the adjustment upwards proportionally. However, while I knew the target absolute hierarchical proportion - adjusted hweight_inuse, I couldn't figure out how to determine the iocg->inuse adjustment to achieve that and approximated the adjustment by scaling iocg->inuse using the proportion of the needed hweight_inuse changes. When nested, these scalings aren't accurate even when adjusting a single node as the donating node also receives the benefit of the donated portion. When multiple nodes are donating as they often do, they can be wildly wrong. iocost employed various safety nets to combat the inaccuracies. There are ample buffers in determining how much to donate, the adjustments are conservative and gradual. While it can achieve a reasonable level of work conservation in simple scenarios, the inaccuracies can easily add up leading to significant loss of total work. This in turn makes it difficult to closely cap vrate as vrate adjustment is needed to compensate for the loss of work. The combination of inaccurate donation calculations and vrate adjustments can lead to wide fluctuations and clunky overall behaviors. Andy Newell devised a method to calculate the needed ->inuse updates to achieve the target hweight_inuse's. The method is compatible with the proportional inuse adjustment propagation which allows all hot path operations to be local to each iocg. To roughly summarize, Andy's method divides the tree into donating and non-donating parts, calculates global donation rate which is used to determine the target hweight_inuse for each node, and then derives per-level proportions. There's non-trivial amount of math involved. Please refer to the following pdfs for detailed descriptions. https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN This patch implements Andy's method in transfer_surpluses(). This makes the donation calculations accurate per cycle and enables further improvements in other parts of the donation logic. Signed-off-by: Tejun Heo Cc: Andy Newell Signed-off-by: Jens Axboe --- block/blk-iocost.c | 252 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 244 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 61b008d0801f..ecc23b827e5d 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -491,9 +491,11 @@ struct ioc_gq { /* see __propagate_weights() and current_hweight() for details */ u64 child_active_sum; u64 child_inuse_sum; + u64 child_adjusted_sum; int hweight_gen; u32 hweight_active; u32 hweight_inuse; + u32 hweight_donating; u32 hweight_after_donation; struct list_head walk_list; @@ -1551,20 +1553,252 @@ static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, return usage; } +/* + * For work-conservation, an iocg which isn't using all of its share should + * donate the leftover to other iocgs. There are two ways to achieve this - 1. + * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight. + * + * #1 is mathematically simpler but has the drawback of requiring synchronous + * global hweight_inuse updates when idle iocg's get activated or inuse weights + * change due to donation snapbacks as it has the possibility of grossly + * overshooting what's allowed by the model and vrate. + * + * #2 is inherently safe with local operations. The donating iocg can easily + * snap back to higher weights when needed without worrying about impacts on + * other nodes as the impacts will be inherently correct. This also makes idle + * iocg activations safe. The only effect activations have is decreasing + * hweight_inuse of others, the right solution to which is for those iocgs to + * snap back to higher weights. + * + * So, we go with #2. The challenge is calculating how each donating iocg's + * inuse should be adjusted to achieve the target donation amounts. This is done + * using Andy's method described in the following pdf. + * + * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo + * + * Given the weights and target after-donation hweight_inuse values, Andy's + * method determines how the proportional distribution should look like at each + * sibling level to maintain the relative relationship between all non-donating + * pairs. To roughly summarize, it divides the tree into donating and + * non-donating parts, calculates global donation rate which is used to + * determine the target hweight_inuse for each node, and then derives per-level + * proportions. + * + * The following pdf shows that global distribution calculated this way can be + * achieved by scaling inuse weights of donating leaves and propagating the + * adjustments upwards proportionally. + * + * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE + * + * Combining the above two, we can determine how each leaf iocg's inuse should + * be adjusted to achieve the target donation. + * + * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN + * + * The inline comments use symbols from the last pdf. + * + * b is the sum of the absolute budgets in the subtree. 1 for the root node. + * f is the sum of the absolute budgets of non-donating nodes in the subtree. + * t is the sum of the absolute budgets of donating nodes in the subtree. + * w is the weight of the node. w = w_f + w_t + * w_f is the non-donating portion of w. w_f = w * f / b + * w_b is the donating portion of w. w_t = w * t / b + * s is the sum of all sibling weights. s = Sum(w) for siblings + * s_f and s_t are the non-donating and donating portions of s. + * + * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g. + * w_pt is the donating portion of the parent's weight and w'_pt the same value + * after adjustments. Subscript r denotes the root node's values. + */ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) { - struct ioc_gq *iocg; + LIST_HEAD(over_hwa); + LIST_HEAD(inner_walk); + struct ioc_gq *iocg, *tiocg, *root_iocg; + u32 after_sum, over_sum, over_target, gamma; + + /* + * It's pretty unlikely but possible for the total sum of + * hweight_after_donation's to be higher than WEIGHT_ONE, which will + * confuse the following calculations. If such condition is detected, + * scale down everyone over its full share equally to keep the sum below + * WEIGHT_ONE. + */ + after_sum = 0; + over_sum = 0; + list_for_each_entry(iocg, surpluses, surplus_list) { + u32 hwa; + + current_hweight(iocg, &hwa, NULL); + after_sum += iocg->hweight_after_donation; + + if (iocg->hweight_after_donation > hwa) { + over_sum += iocg->hweight_after_donation; + list_add(&iocg->walk_list, &over_hwa); + } + } + + if (after_sum >= WEIGHT_ONE) { + /* + * The delta should be deducted from the over_sum, calculate + * target over_sum value. + */ + u32 over_delta = after_sum - (WEIGHT_ONE - 1); + WARN_ON_ONCE(over_sum <= over_delta); + over_target = over_sum - over_delta; + } else { + over_target = 0; + } + + list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) { + if (over_target) + iocg->hweight_after_donation = + div_u64((u64)iocg->hweight_after_donation * + over_target, over_sum); + list_del_init(&iocg->walk_list); + } + + /* + * Build pre-order inner node walk list and prepare for donation + * adjustment calculations. + */ + list_for_each_entry(iocg, surpluses, surplus_list) { + iocg_build_inner_walk(iocg, &inner_walk); + } + + root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list); + WARN_ON_ONCE(root_iocg->level > 0); + list_for_each_entry(iocg, &inner_walk, walk_list) { + iocg->child_adjusted_sum = 0; + iocg->hweight_donating = 0; + iocg->hweight_after_donation = 0; + } + + /* + * Propagate the donating budget (b_t) and after donation budget (b'_t) + * up the hierarchy. + */ list_for_each_entry(iocg, surpluses, surplus_list) { - u32 old_hwi, new_hwi, new_inuse; + struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; + + parent->hweight_donating += iocg->hweight_donating; + parent->hweight_after_donation += iocg->hweight_after_donation; + } - current_hweight(iocg, NULL, &old_hwi); - new_hwi = iocg->hweight_after_donation; + list_for_each_entry_reverse(iocg, &inner_walk, walk_list) { + if (iocg->level > 0) { + struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; - new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi, - old_hwi); - __propagate_weights(iocg, iocg->weight, new_inuse); + parent->hweight_donating += iocg->hweight_donating; + parent->hweight_after_donation += iocg->hweight_after_donation; + } + } + + /* + * Calculate inner hwa's (b) and make sure the donation values are + * within the accepted ranges as we're doing low res calculations with + * roundups. + */ + list_for_each_entry(iocg, &inner_walk, walk_list) { + if (iocg->level) { + struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; + + iocg->hweight_active = DIV64_U64_ROUND_UP( + (u64)parent->hweight_active * iocg->active, + parent->child_active_sum); + + } + + iocg->hweight_donating = min(iocg->hweight_donating, + iocg->hweight_active); + iocg->hweight_after_donation = min(iocg->hweight_after_donation, + iocg->hweight_donating - 1); + if (WARN_ON_ONCE(iocg->hweight_active <= 1 || + iocg->hweight_donating <= 1 || + iocg->hweight_after_donation == 0)) { + pr_warn("iocg: invalid donation weights in "); + pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup); + pr_cont(": active=%u donating=%u after=%u\n", + iocg->hweight_active, iocg->hweight_donating, + iocg->hweight_after_donation); + } } + + /* + * Calculate the global donation rate (gamma) - the rate to adjust + * non-donating budgets by. No need to use 64bit multiplication here as + * the first operand is guaranteed to be smaller than WEIGHT_ONE + * (1<<16). + * + * gamma = (1 - t_r') / (1 - t_r) + */ + gamma = DIV_ROUND_UP( + (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE, + WEIGHT_ONE - root_iocg->hweight_donating); + + /* + * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner + * nodes. + */ + list_for_each_entry(iocg, &inner_walk, walk_list) { + struct ioc_gq *parent; + u32 inuse, wpt, wptp; + u64 st, sf; + + if (iocg->level == 0) { + /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */ + iocg->child_adjusted_sum = DIV64_U64_ROUND_UP( + iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating), + WEIGHT_ONE - iocg->hweight_after_donation); + continue; + } + + parent = iocg->ancestors[iocg->level - 1]; + + /* b' = gamma * b_f + b_t' */ + iocg->hweight_inuse = DIV64_U64_ROUND_UP( + (u64)gamma * (iocg->hweight_active - iocg->hweight_donating), + WEIGHT_ONE) + iocg->hweight_after_donation; + + /* w' = s' * b' / b'_p */ + inuse = DIV64_U64_ROUND_UP( + (u64)parent->child_adjusted_sum * iocg->hweight_inuse, + parent->hweight_inuse); + + /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */ + st = DIV64_U64_ROUND_UP( + iocg->child_active_sum * iocg->hweight_donating, + iocg->hweight_active); + sf = iocg->child_active_sum - st; + wpt = DIV64_U64_ROUND_UP( + (u64)iocg->active * iocg->hweight_donating, + iocg->hweight_active); + wptp = DIV64_U64_ROUND_UP( + (u64)inuse * iocg->hweight_after_donation, + iocg->hweight_inuse); + + iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt); + } + + /* + * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and + * we can finally determine leaf adjustments. + */ + list_for_each_entry(iocg, surpluses, surplus_list) { + struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; + u32 inuse; + + /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */ + inuse = DIV64_U64_ROUND_UP( + parent->child_adjusted_sum * iocg->hweight_after_donation, + parent->hweight_inuse); + __propagate_weights(iocg, iocg->active, inuse); + } + + /* walk list should be dissolved after use */ + list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list) + list_del_init(&iocg->walk_list); } static void ioc_timer_fn(struct timer_list *timer) @@ -1705,16 +1939,18 @@ static void ioc_timer_fn(struct timer_list *timer) if (hw_inuse < hw_active || (!waitqueue_active(&iocg->waitq) && time_before64(vtime, now.vnow - ioc->margins.max))) { - u32 hwm, new_hwi; + u32 hwa, hwm, new_hwi; /* * Already donating or accumulated enough to start. * Determine the donation amount. */ + current_hweight(iocg, &hwa, NULL); hwm = current_hweight_max(iocg); new_hwi = hweight_after_donation(iocg, hwm, usage, &now); if (new_hwi < hwm) { + iocg->hweight_donating = hwa; iocg->hweight_after_donation = new_hwi; list_add(&iocg->surplus_list, &surpluses); } else { -- cgit v1.2.3 From f1de2439ec43b74764f2a26e3a310b24407e3bde Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:49 -0400 Subject: blk-iocost: revamp donation amount determination iocost has various safety nets to combat inuse adjustment calculation inaccuracies. With Andy's method implemented in transfer_surpluses(), inuse adjustment calculations are now accurate and we can make donation amount determinations accurate too. * Stop keeping track of past usage history and using the maximum. Act on the immediate usage information. * Remove donation constraints defined by SURPLUS_* constants. Donate whatever isn't used. * Determine the donation amount so that the iocg will end up with MARGIN_TARGET_PCT budget at the end of the coming period assuming the same usage as the previous period. TARGET is set at 50% of period, which is the previous maximum. This provides smooth convergence for most repetitive IO patterns. * Apply donation logic early at 20% budget. There's no risk in doing so as the calculation is based on the delta between the current budget and the target budget at the end of the coming period. * Remove preemptive iocg activation for zero cost IOs. As donation can reach near zero now, the mere activation doesn't provide any protection anymore. In the unlikely case that this becomes a problem, the right solution is assigning appropriate costs for such IOs. This significantly improves the donation determination logic while also simplifying it. Now all donations are immediate, exact and smooth. Signed-off-by: Tejun Heo Cc: Andy Newell Signed-off-by: Jens Axboe --- block/blk-iocost.c | 133 ++++++++++++++++++++--------------------------------- 1 file changed, 51 insertions(+), 82 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index ecc23b827e5d..694f1487208a 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -217,12 +217,14 @@ enum { MAX_PERIOD = USEC_PER_SEC, /* - * A cgroup's vtime can run 50% behind the device vtime, which + * iocg->vtime is targeted at 50% behind the device vtime, which * serves as its IO credit buffer. Surplus weight adjustment is * immediately canceled if the vtime margin runs below 10%. */ MARGIN_MIN_PCT = 10, - MARGIN_MAX_PCT = 50, + MARGIN_LOW_PCT = 20, + MARGIN_TARGET_PCT = 50, + MARGIN_MAX_PCT = 100, /* Have some play in timer operations */ TIMER_SLACK_PCT = 1, @@ -234,17 +236,6 @@ enum { */ VTIME_VALID_DUR = 300 * USEC_PER_SEC, - /* - * Remember the past three non-zero usages and use the max for - * surplus calculation. Three slots guarantee that we remember one - * full period usage from the last active stretch even after - * partial deactivation and re-activation periods. Don't start - * giving away weight before collecting two data points to prevent - * hweight adjustments based on one partial activation period. - */ - NR_USAGE_SLOTS = 3, - MIN_VALID_USAGES = 2, - /* 1/64k is granular enough and can easily be handled w/ u32 */ WEIGHT_ONE = 1 << 16, @@ -280,14 +271,6 @@ enum { /* don't let cmds which take a very long time pin lagging for too long */ MAX_LAGGING_PERIODS = 10, - /* - * If usage% * 1.25 + 2% is lower than hweight% by more than 3%, - * donate the surplus. - */ - SURPLUS_SCALE_PCT = 125, /* * 125% */ - SURPLUS_SCALE_ABS = WEIGHT_ONE / 50, /* + 2% */ - SURPLUS_MIN_ADJ_DELTA = WEIGHT_ONE / 33, /* 3% */ - /* switch iff the conditions are met for longer than this */ AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, @@ -376,6 +359,8 @@ struct ioc_params { struct ioc_margins { s64 min; + s64 low; + s64 target; s64 max; }; @@ -514,11 +499,7 @@ struct ioc_gq { struct iocg_stat desc_stat; struct iocg_stat last_stat; u64 last_stat_abs_vusage; - - /* usage is recorded as fractions of WEIGHT_ONE */ - u32 usage_delta_us; - int usage_idx; - u32 usages[NR_USAGE_SLOTS]; + u64 usage_delta_us; /* this iocg's depth in the hierarchy and ancestors including self */ int level; @@ -737,6 +718,8 @@ static void ioc_refresh_margins(struct ioc *ioc) u64 vrate = atomic64_read(&ioc->vtime_rate); margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; + margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate; + margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate; margins->max = (period_us * MARGIN_MAX_PCT / 100) * vrate; } @@ -1228,7 +1211,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) return false; } if (!atomic_read(&blkg->use_delay) && - time_before_eq64(vtime, now->vnow + ioc->margins.max)) + time_before_eq64(vtime, now->vnow + ioc->margins.target)) return false; /* use delay */ @@ -1527,7 +1510,7 @@ static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, { struct ioc *ioc = iocg->ioc; u64 vtime = atomic64_read(&iocg->vtime); - s64 excess; + s64 excess, delta, target, new_hwi; /* see whether minimum margin requirement is met */ if (waitqueue_active(&iocg->waitq) || @@ -1542,15 +1525,28 @@ static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, vtime += excess; } - /* add margin */ - usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100); - usage += SURPLUS_SCALE_ABS; - - /* don't bother if the surplus is too small */ - if (usage + SURPLUS_MIN_ADJ_DELTA > hwm) - return hwm; + /* + * Let's say the distance between iocg's and device's vtimes as a + * fraction of period duration is delta. Assuming that the iocg will + * consume the usage determined above, we want to determine new_hwi so + * that delta equals MARGIN_TARGET at the end of the next period. + * + * We need to execute usage worth of IOs while spending the sum of the + * new budget (1 - MARGIN_TARGET) and the leftover from the last period + * (delta): + * + * usage = (1 - MARGIN_TARGET + delta) * new_hwi + * + * Therefore, the new_hwi is: + * + * new_hwi = usage / (1 - MARGIN_TARGET + delta) + */ + delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime), + now->vnow - ioc->period_at_vtime); + target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100; + new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta); - return usage; + return clamp_t(s64, new_hwi, 1, hwm); } /* @@ -1812,7 +1808,7 @@ static void ioc_timer_fn(struct timer_list *timer) u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; u32 missed_ppm[2], rq_wait_pct; u64 period_vtime; - int prev_busy_level, i; + int prev_busy_level; /* how were the latencies during the period? */ ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); @@ -1857,11 +1853,10 @@ static void ioc_timer_fn(struct timer_list *timer) } commit_weights(ioc); - /* calc usages and see whether some weights need to be moved around */ + /* calc usage and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - u64 vdone, vtime, usage_us; - u32 hw_active, hw_inuse, usage; - int uidx, nr_valid; + u64 vdone, vtime, usage_us, usage_dur; + u32 usage, hw_active, hw_inuse; /* * Collect unused and wind vtime closer to vnow to prevent @@ -1886,15 +1881,11 @@ static void ioc_timer_fn(struct timer_list *timer) nr_lagging++; /* - * Determine absolute usage factoring in pending and in-flight - * IOs to avoid stalls and high-latency completions appearing as - * idle. + * Determine absolute usage factoring in in-flight IOs to avoid + * high-latency completions appearing as idle. */ usage_us = iocg->usage_delta_us; - if (waitqueue_active(&iocg->waitq) && time_before64(vtime, now.vnow)) - usage_us += DIV64_U64_ROUND_UP( - cost_to_abs_cost(now.vnow - vtime, hw_inuse), - now.vrate); + if (vdone != vtime) { u64 inflight_us = DIV64_U64_ROUND_UP( cost_to_abs_cost(vtime - vdone, hw_inuse), @@ -1902,43 +1893,22 @@ static void ioc_timer_fn(struct timer_list *timer) usage_us = max(usage_us, inflight_us); } - /* convert to hweight based usage ratio and record */ - uidx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS; - - if (time_after64(vtime, now.vnow - ioc->margins.min)) { - iocg->usage_idx = uidx; - iocg->usages[uidx] = WEIGHT_ONE; - } else if (usage_us) { - u64 started_at, dur; - - if (time_after64(iocg->activated_at, ioc->period_at)) - started_at = iocg->activated_at; - else - started_at = ioc->period_at; - - dur = max_t(u64, now.now - started_at, 1); + /* convert to hweight based usage ratio */ + if (time_after64(iocg->activated_at, ioc->period_at)) + usage_dur = max_t(u64, now.now - iocg->activated_at, 1); + else + usage_dur = max_t(u64, now.now - ioc->period_at, 1); - iocg->usage_idx = uidx; - iocg->usages[uidx] = clamp_t(u32, - DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, dur), + usage = clamp_t(u32, + DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, + usage_dur), 1, WEIGHT_ONE); - } - - /* base the decision on max historical usage */ - for (i = 0, usage = 0, nr_valid = 0; i < NR_USAGE_SLOTS; i++) { - if (iocg->usages[i]) { - usage = max(usage, iocg->usages[i]); - nr_valid++; - } - } - if (nr_valid < MIN_VALID_USAGES) - usage = WEIGHT_ONE; /* see whether there's surplus vtime */ WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); if (hw_inuse < hw_active || (!waitqueue_active(&iocg->waitq) && - time_before64(vtime, now.vnow - ioc->margins.max))) { + time_before64(vtime, now.vnow - ioc->margins.low))) { u32 hwa, hwm, new_hwi; /* @@ -2175,15 +2145,14 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) if (!ioc->enabled || !iocg->level) return; - /* always activate so that even 0 cost IOs get protected to some level */ - if (!iocg_activate(iocg, &now)) - return; - /* calculate the absolute vtime cost */ abs_cost = calc_vtime_cost(bio, iocg, false); if (!abs_cost) return; + if (!iocg_activate(iocg, &now)) + return; + iocg->cursor = bio_end_sector(bio); vtime = atomic64_read(&iocg->vtime); -- cgit v1.2.3 From b0853ab4a238c54b8f97ca7dde1ae156e2bbd5e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:50 -0400 Subject: blk-iocost: revamp in-period donation snapbacks When the margin drops below the minimum on a donating iocg, donation is immediately canceled in full. There are a couple shortcomings with the current behavior. * It's abrupt. A small temporary budget deficit can lead to a wide swing in weight allocation and a large surplus. * It's open coded in the issue path but not implemented for the merge path. A series of merges at a low inuse can make the iocg incur debts and stall incorrectly. This patch reimplements in-period donation snapbacks so that * inuse adjustment and cost calculations are factored into adjust_inuse_and_calc_cost() which is called from both the issue and merge paths. * Snapbacks are more gradual. It occurs in quarter steps. * A snapback triggers if the margin goes below the low threshold and is lower than the budget at the time of the last adjustment. * For the above, __propagate_weights() stores the margin in iocg->saved_margin. Move iocg->last_inuse storing together into __propagate_weights() for consistency. * Full snapback is guaranteed when there are waiters. * With precise donation and gradual snapbacks, inuse adjustments are now a lot more effective and the value of scaling inuse on weight changes isn't clear. Removed inuse scaling from weight_update(). Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 133 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 96 insertions(+), 37 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 694f1487208a..d09b4011449c 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -226,6 +226,8 @@ enum { MARGIN_TARGET_PCT = 50, MARGIN_MAX_PCT = 100, + INUSE_ADJ_STEP_PCT = 25, + /* Have some play in timer operations */ TIMER_SLACK_PCT = 1, @@ -443,12 +445,17 @@ struct ioc_gq { * * `last_inuse` remembers `inuse` while an iocg is idle to persist * surplus adjustments. + * + * `inuse` may be adjusted dynamically during period. `saved_*` are used + * to determine and track adjustments. */ u32 cfg_weight; u32 weight; u32 active; u32 inuse; + u32 last_inuse; + s64 saved_margin; sector_t cursor; /* to detect randio */ @@ -934,9 +941,11 @@ static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) /* * Update @iocg's `active` and `inuse` to @active and @inuse, update level - * weight sums and propagate upwards accordingly. + * weight sums and propagate upwards accordingly. If @save, the current margin + * is saved to be used as reference for later inuse in-period adjustments. */ -static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) +static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, + bool save, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; int lvl; @@ -945,6 +954,10 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) inuse = clamp_t(u32, inuse, 1, active); + iocg->last_inuse = iocg->inuse; + if (save) + iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime); + if (active == iocg->active && inuse == iocg->inuse) return; @@ -996,9 +1009,10 @@ static void commit_weights(struct ioc *ioc) } } -static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse) +static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, + bool save, struct ioc_now *now) { - __propagate_weights(iocg, active, inuse); + __propagate_weights(iocg, active, inuse, save, now); commit_weights(iocg->ioc); } @@ -1082,7 +1096,7 @@ static u32 current_hweight_max(struct ioc_gq *iocg) return max_t(u32, hwm, 1); } -static void weight_updated(struct ioc_gq *iocg) +static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); @@ -1093,9 +1107,7 @@ static void weight_updated(struct ioc_gq *iocg) weight = iocg->cfg_weight ?: iocc->dfl_weight; if (weight != iocg->weight && iocg->active) - propagate_weights(iocg, weight, - DIV64_U64_ROUND_UP((u64)iocg->inuse * weight, - iocg->weight)); + propagate_weights(iocg, weight, iocg->inuse, true, now); iocg->weight = weight; } @@ -1165,8 +1177,9 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) */ iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; list_add(&iocg->active_list, &ioc->active_iocgs); + propagate_weights(iocg, iocg->weight, - iocg->last_inuse ?: iocg->weight); + iocg->last_inuse ?: iocg->weight, true, now); TRACE_IOCG_PATH(iocg_activate, iocg, now, last_period, cur_period, vtime); @@ -1789,7 +1802,7 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) inuse = DIV64_U64_ROUND_UP( parent->child_adjusted_sum * iocg->hweight_after_donation, parent->hweight_inuse); - __propagate_weights(iocg, iocg->active, inuse); + __propagate_weights(iocg, iocg->active, inuse, true, now); } /* walk list should be dissolved after use */ @@ -1844,8 +1857,7 @@ static void ioc_timer_fn(struct timer_list *timer) iocg_kick_waitq(iocg, true, &now); } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ - iocg->last_inuse = iocg->inuse; - __propagate_weights(iocg, 0, 0); + __propagate_weights(iocg, 0, 0, false, &now); list_del_init(&iocg->active_list); } @@ -1925,7 +1937,7 @@ static void ioc_timer_fn(struct timer_list *timer) list_add(&iocg->surplus_list, &surpluses); } else { __propagate_weights(iocg, iocg->active, - iocg->active); + iocg->active, true, &now); nr_shortages++; } } else { @@ -2055,6 +2067,50 @@ static void ioc_timer_fn(struct timer_list *timer) spin_unlock_irq(&ioc->lock); } +static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, + u64 abs_cost, struct ioc_now *now) +{ + struct ioc *ioc = iocg->ioc; + struct ioc_margins *margins = &ioc->margins; + u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); + u32 hwi; + s64 margin; + u64 cost, new_inuse; + + current_hweight(iocg, NULL, &hwi); + cost = abs_cost_to_cost(abs_cost, hwi); + margin = now->vnow - vtime - cost; + + /* + * We only increase inuse during period and do so iff the margin has + * deteriorated since the previous adjustment. + */ + if (margin >= iocg->saved_margin || margin >= margins->low || + iocg->inuse == iocg->active) + return cost; + + spin_lock_irq(&ioc->lock); + + /* we own inuse only when @iocg is in the normal active state */ + if (list_empty(&iocg->active_list)) { + spin_unlock_irq(&ioc->lock); + return cost; + } + + /* bump up inuse till @abs_cost fits in the existing budget */ + new_inuse = iocg->inuse; + do { + new_inuse = new_inuse + adj_step; + propagate_weights(iocg, iocg->active, new_inuse, true, now); + current_hweight(iocg, NULL, &hwi); + cost = abs_cost_to_cost(abs_cost, hwi); + } while (time_after64(vtime + cost, now->vnow) && + iocg->inuse != iocg->active); + + spin_unlock_irq(&ioc->lock); + return cost; +} + static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, bool is_merge, u64 *costp) { @@ -2136,7 +2192,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) struct ioc_gq *iocg = blkg_to_iocg(blkg); struct ioc_now now; struct iocg_wait wait; - u32 hw_active, hw_inuse; u64 abs_cost, cost, vtime; bool use_debt, ioc_locked; unsigned long flags; @@ -2154,21 +2209,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) return; iocg->cursor = bio_end_sector(bio); - vtime = atomic64_read(&iocg->vtime); - current_hweight(iocg, &hw_active, &hw_inuse); - - if (hw_inuse < hw_active && - time_after_eq64(vtime + ioc->margins.min, now.vnow)) { - TRACE_IOCG_PATH(inuse_reset, iocg, &now, - iocg->inuse, iocg->weight, hw_inuse, hw_active); - spin_lock_irq(&ioc->lock); - propagate_weights(iocg, iocg->weight, iocg->weight); - spin_unlock_irq(&ioc->lock); - current_hweight(iocg, &hw_active, &hw_inuse); - } - - cost = abs_cost_to_cost(abs_cost, hw_inuse); + cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); /* * If no one's waiting and within budget, issue right away. The @@ -2190,7 +2232,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) */ use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current); ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt); - +retry_lock: iocg_lock(iocg, ioc_locked, &flags); /* @@ -2232,6 +2274,17 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) return; } + /* guarantee that iocgs w/ waiters have maximum inuse */ + if (iocg->inuse != iocg->active) { + if (!ioc_locked) { + iocg_unlock(iocg, false, &flags); + ioc_locked = true; + goto retry_lock; + } + propagate_weights(iocg, iocg->active, iocg->active, true, + &now); + } + /* * Append self to the waitq and schedule the wakeup timer if we're * the first waiter. The timer duration is calculated based on the @@ -2274,8 +2327,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, struct ioc *ioc = iocg->ioc; sector_t bio_end = bio_end_sector(bio); struct ioc_now now; - u32 hw_inuse; - u64 abs_cost, cost; + u64 vtime, abs_cost, cost; unsigned long flags; /* bypass if disabled or for root cgroup */ @@ -2287,8 +2339,9 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, return; ioc_now(ioc, &now); - current_hweight(iocg, NULL, &hw_inuse); - cost = abs_cost_to_cost(abs_cost, hw_inuse); + + vtime = atomic64_read(&iocg->vtime); + cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); /* update cursor if backmerging into the request at the cursor */ if (blk_rq_pos(rq) < bio_end && @@ -2530,7 +2583,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd) } spin_lock_irqsave(&ioc->lock, flags); - weight_updated(iocg); + weight_updated(iocg, &now); spin_unlock_irqrestore(&ioc->lock, flags); } @@ -2544,7 +2597,10 @@ static void ioc_pd_free(struct blkg_policy_data *pd) spin_lock_irqsave(&ioc->lock, flags); if (!list_empty(&iocg->active_list)) { - propagate_weights(iocg, 0, 0); + struct ioc_now now; + + ioc_now(ioc, &now); + propagate_weights(iocg, 0, 0, false, &now); list_del_init(&iocg->active_list); } @@ -2612,6 +2668,7 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, struct blkcg *blkcg = css_to_blkcg(of_css(of)); struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); struct blkg_conf_ctx ctx; + struct ioc_now now; struct ioc_gq *iocg; u32 v; int ret; @@ -2632,7 +2689,8 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, if (iocg) { spin_lock_irq(&iocg->ioc->lock); - weight_updated(iocg); + ioc_now(iocg->ioc, &now); + weight_updated(iocg, &now); spin_unlock_irq(&iocg->ioc->lock); } } @@ -2658,7 +2716,8 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, spin_lock(&iocg->ioc->lock); iocg->cfg_weight = v * WEIGHT_ONE; - weight_updated(iocg); + ioc_now(iocg->ioc, &now); + weight_updated(iocg, &now); spin_unlock(&iocg->ioc->lock); blkg_conf_finish(&ctx); -- cgit v1.2.3 From c421a3eb2e27402c14131a14390551ae6cdb15fa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:51 -0400 Subject: blk-iocost: revamp debt handling Debt handling had several issues. * How much inuse a debtor carries wasn't clearly defined. inuse would be driven down over time from not issuing IOs but it'd be better to clamp it to minimum immediately once in debt. * How much can be paid off was determined by hweight_inuse. As inuse was driven down, the payment amount would fall together regardless of the debtor's active weight. This means that the debtors were punished harshly. * ioc_rqos_merge() wasn't calling blkcg_schedule_throttle() after iocg_kick_delay(). This patch revamps debt handling so that * Debt handling owns inuse for iocgs in debt and keeps them at zero. * Payment amount is determined by hweight_active. This is more deterministic and safer than hweight_inuse but still far from ideal in that it doesn't factor in possible donations from other iocgs for debt payments. This likely needs further improvements in the future. * iocg_rqos_merge() now calls blkcg_schedule_throttle() as necessary. Signed-off-by: Tejun Heo Cc: Andy Newell Signed-off-by: Jens Axboe --- block/blk-iocost.c | 117 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 93 insertions(+), 24 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d09b4011449c..d2b69d87f3e7 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1206,13 +1206,13 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) struct blkcg_gq *blkg = iocg_to_blkg(iocg); u64 vtime = atomic64_read(&iocg->vtime); u64 delta_ns, expires, oexpires; - u32 hw_inuse; + u32 hwa; lockdep_assert_held(&iocg->waitq.lock); /* debt-adjust vtime */ - current_hweight(iocg, NULL, &hw_inuse); - vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + current_hweight(iocg, &hwa, NULL); + vtime += abs_cost_to_cost(iocg->abs_vdebt, hwa); /* * Clear or maintain depending on the overage. Non-zero vdebt is what @@ -1258,6 +1258,47 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } +static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, + struct ioc_now *now) +{ + struct iocg_pcpu_stat *gcs; + + lockdep_assert_held(&iocg->ioc->lock); + lockdep_assert_held(&iocg->waitq.lock); + WARN_ON_ONCE(list_empty(&iocg->active_list)); + + /* + * Once in debt, debt handling owns inuse. @iocg stays at the minimum + * inuse donating all of it share to others until its debt is paid off. + */ + if (!iocg->abs_vdebt && abs_cost) + propagate_weights(iocg, iocg->active, 0, false, now); + + iocg->abs_vdebt += abs_cost; + + gcs = get_cpu_ptr(iocg->pcpu_stat); + local64_add(abs_cost, &gcs->abs_vusage); + put_cpu_ptr(gcs); +} + +static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, + struct ioc_now *now) +{ + lockdep_assert_held(&iocg->ioc->lock); + lockdep_assert_held(&iocg->waitq.lock); + + /* make sure that nobody messed with @iocg */ + WARN_ON_ONCE(list_empty(&iocg->active_list)); + WARN_ON_ONCE(iocg->inuse > 1); + + iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); + + /* if debt is paid in full, restore inuse */ + if (!iocg->abs_vdebt) + propagate_weights(iocg, iocg->active, iocg->last_inuse, + false, now); +} + static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key) { @@ -1296,26 +1337,25 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, struct iocg_wake_ctx ctx = { .iocg = iocg }; u64 vshortage, expires, oexpires; s64 vbudget; - u32 hw_inuse; + u32 hwa; lockdep_assert_held(&iocg->waitq.lock); - current_hweight(iocg, NULL, &hw_inuse); + current_hweight(iocg, &hwa, NULL); vbudget = now->vnow - atomic64_read(&iocg->vtime); /* pay off debt */ if (pay_debt && iocg->abs_vdebt && vbudget > 0) { - u64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); - u64 delta = min_t(u64, vbudget, vdebt); - u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse), - iocg->abs_vdebt); + u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa); + u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt); + u64 vpay = abs_cost_to_cost(abs_vpay, hwa); lockdep_assert_held(&ioc->lock); - atomic64_add(delta, &iocg->vtime); - atomic64_add(delta, &iocg->done_vtime); - iocg->abs_vdebt -= abs_delta; - vbudget -= vdebt; + atomic64_add(vpay, &iocg->vtime); + atomic64_add(vpay, &iocg->done_vtime); + iocg_pay_debt(iocg, abs_vpay, now); + vbudget -= vpay; iocg_kick_delay(iocg, now); } @@ -1327,17 +1367,20 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, * not positive. */ if (iocg->abs_vdebt) { - s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse); + s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa); vbudget = min_t(s64, 0, vbudget - vdebt); } /* - * Wake up the ones which are due and see how much vtime we'll need - * for the next one. + * Wake up the ones which are due and see how much vtime we'll need for + * the next one. As paying off debt restores hw_inuse, it must be read + * after the above debt payment. */ - ctx.hw_inuse = hw_inuse; ctx.vbudget = vbudget; + current_hweight(iocg, NULL, &ctx.hw_inuse); + __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); + if (!waitqueue_active(&iocg->waitq)) return; if (WARN_ON_ONCE(ctx.vbudget >= 0)) @@ -1525,6 +1568,10 @@ static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, u64 vtime = atomic64_read(&iocg->vtime); s64 excess, delta, target, new_hwi; + /* debt handling owns inuse for debtors */ + if (iocg->abs_vdebt) + return 1; + /* see whether minimum margin requirement is met */ if (waitqueue_active(&iocg->waitq) || time_after64(vtime, now->vnow - ioc->margins.min)) @@ -1798,6 +1845,18 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; u32 inuse; + /* + * In-debt iocgs participated in the donation calculation with + * the minimum target hweight_inuse. Configuring inuse + * accordingly would work fine but debt handling expects + * @iocg->inuse stay at the minimum and we don't wanna + * interfere. + */ + if (iocg->abs_vdebt) { + WARN_ON_ONCE(iocg->inuse > 1); + continue; + } + /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */ inuse = DIV64_U64_ROUND_UP( parent->child_adjusted_sum * iocg->hweight_after_donation, @@ -2081,6 +2140,10 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, cost = abs_cost_to_cost(abs_cost, hwi); margin = now->vnow - vtime - cost; + /* debt handling owns inuse for debtors */ + if (iocg->abs_vdebt) + return cost; + /* * We only increase inuse during period and do so iff the margin has * deteriorated since the previous adjustment. @@ -2092,7 +2155,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, spin_lock_irq(&ioc->lock); /* we own inuse only when @iocg is in the normal active state */ - if (list_empty(&iocg->active_list)) { + if (iocg->abs_vdebt || list_empty(&iocg->active_list)) { spin_unlock_irq(&ioc->lock); return cost; } @@ -2266,7 +2329,7 @@ retry_lock: * penalizing the cgroup and its descendants. */ if (use_debt) { - iocg->abs_vdebt += abs_cost; + iocg_incur_debt(iocg, abs_cost, &now); if (iocg_kick_delay(iocg, &now)) blkcg_schedule_throttle(rqos->q, (bio->bi_opf & REQ_SWAP) == REQ_SWAP); @@ -2275,7 +2338,7 @@ retry_lock: } /* guarantee that iocgs w/ waiters have maximum inuse */ - if (iocg->inuse != iocg->active) { + if (!iocg->abs_vdebt && iocg->inuse != iocg->active) { if (!ioc_locked) { iocg_unlock(iocg, false, &flags); ioc_locked = true; @@ -2363,14 +2426,20 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, * be for the vast majority of cases. See debt handling in * ioc_rqos_throttle() for details. */ - spin_lock_irqsave(&iocg->waitq.lock, flags); + spin_lock_irqsave(&ioc->lock, flags); + spin_lock(&iocg->waitq.lock); + if (likely(!list_empty(&iocg->active_list))) { - iocg->abs_vdebt += abs_cost; - iocg_kick_delay(iocg, &now); + iocg_incur_debt(iocg, abs_cost, &now); + if (iocg_kick_delay(iocg, &now)) + blkcg_schedule_throttle(rqos->q, + (bio->bi_opf & REQ_SWAP) == REQ_SWAP); } else { iocg_commit_bio(iocg, bio, abs_cost, cost); } - spin_unlock_irqrestore(&iocg->waitq.lock, flags); + + spin_unlock(&iocg->waitq.lock); + spin_unlock_irqrestore(&ioc->lock, flags); } static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) -- cgit v1.2.3 From 5160a5a53c0c4ae3708959d9465ea43ad5d90542 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:52 -0400 Subject: blk-iocost: implement delay adjustment hysteresis Curently, iocost syncs the delay duration to the outstanding debt amount, which seemed enough to protect the system from anon memory hogs. However, that was mostly because the delay calcuation was using hweight_inuse which quickly converges towards zero under debt for delay duration calculation, often pusnishing debtors overly harshly for longer than deserved. The previous patch fixed the delay calcuation and now the protection against anonymous memory hogs isn't enough because the effect of delay is indirect and non-linear and a huge amount of future debt can accumulate abruptly while unthrottled. This patch implements delay hysteresis so that delay is decayed exponentially over time instead of getting cleared immediately as debt is paid off. While the overall behavior is similar to the blk-cgroup implementation used by blk-iolatency, a lot of the details are different and due to the empirical nature of the mechanism, it's challenging to adapt the mechanism for one controller without negatively impacting the other. As the delay is gradually decayed now, there's no point in running it from its own hrtimer. Periodic updates are now performed from ioc_timer_fn() and the dedicated hrtimer is removed. Signed-off-by: Tejun Heo Cc: Josef Bacik Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 23 ++++++++--- block/blk-iocost.c | 119 +++++++++++++++++++++++++++++++---------------------- 2 files changed, 86 insertions(+), 56 deletions(-) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c195365c9817..d33dd6be1d9c 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1613,16 +1613,24 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) { unsigned long pflags; + bool clamp; u64 now = ktime_to_ns(ktime_get()); u64 exp; u64 delay_nsec = 0; int tok; while (blkg->parent) { - if (atomic_read(&blkg->use_delay)) { + int use_delay = atomic_read(&blkg->use_delay); + + if (use_delay) { + u64 this_delay; + blkcg_scale_delay(blkg, now); - delay_nsec = max_t(u64, delay_nsec, - atomic64_read(&blkg->delay_nsec)); + this_delay = atomic64_read(&blkg->delay_nsec); + if (this_delay > delay_nsec) { + delay_nsec = this_delay; + clamp = use_delay > 0; + } } blkg = blkg->parent; } @@ -1634,10 +1642,13 @@ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) * Let's not sleep for all eternity if we've amassed a huge delay. * Swapping or metadata IO can accumulate 10's of seconds worth of * delay, and we want userspace to be able to do _something_ so cap the - * delays at 1 second. If there's 10's of seconds worth of delay then - * the tasks will be delayed for 1 second for every syscall. + * delays at 0.25s. If there's 10's of seconds worth of delay then the + * tasks will be delayed for 0.25 second for every syscall. If + * blkcg_set_delay() was used as indicated by negative use_delay, the + * caller is responsible for regulating the range. */ - delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); + if (clamp) + delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC); if (use_memdelay) psi_memstall_enter(&pflags); diff --git a/block/blk-iocost.c b/block/blk-iocost.c index d2b69d87f3e7..9cb8f29f01f5 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -270,6 +270,31 @@ enum { /* unbusy hysterisis */ UNBUSY_THR_PCT = 75, + /* + * The effect of delay is indirect and non-linear and a huge amount of + * future debt can accumulate abruptly while unthrottled. Linearly scale + * up delay as debt is going up and then let it decay exponentially. + * This gives us quick ramp ups while delay is accumulating and long + * tails which can help reducing the frequency of debt explosions on + * unthrottle. The parameters are experimentally determined. + * + * The delay mechanism provides adequate protection and behavior in many + * cases. However, this is far from ideal and falls shorts on both + * fronts. The debtors are often throttled too harshly costing a + * significant level of fairness and possibly total work while the + * protection against their impacts on the system can be choppy and + * unreliable. + * + * The shortcoming primarily stems from the fact that, unlike for page + * cache, the kernel doesn't have well-defined back-pressure propagation + * mechanism and policies for anonymous memory. Fully addressing this + * issue will likely require substantial improvements in the area. + */ + MIN_DELAY_THR_PCT = 500, + MAX_DELAY_THR_PCT = 25000, + MIN_DELAY = 250, + MAX_DELAY = 250 * USEC_PER_MSEC, + /* don't let cmds which take a very long time pin lagging for too long */ MAX_LAGGING_PERIODS = 10, @@ -473,6 +498,10 @@ struct ioc_gq { atomic64_t done_vtime; u64 abs_vdebt; + /* current delay in effect and when it started */ + u64 delay; + u64 delay_at; + /* * The period this iocg was last active in. Used for deactivation * and invalidating `vtime`. @@ -495,7 +524,6 @@ struct ioc_gq { struct wait_queue_head waitq; struct hrtimer waitq_timer; - struct hrtimer delay_timer; /* timestamp at the latest activation */ u64 activated_at; @@ -1204,58 +1232,50 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); - u64 vtime = atomic64_read(&iocg->vtime); - u64 delta_ns, expires, oexpires; + u64 tdelta, delay, new_delay; + s64 vover, vover_pct; u32 hwa; lockdep_assert_held(&iocg->waitq.lock); - /* debt-adjust vtime */ + /* calculate the current delay in effect - 1/2 every second */ + tdelta = now->now - iocg->delay_at; + if (iocg->delay) + delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC); + else + delay = 0; + + /* calculate the new delay from the debt amount */ current_hweight(iocg, &hwa, NULL); - vtime += abs_cost_to_cost(iocg->abs_vdebt, hwa); + vover = atomic64_read(&iocg->vtime) + + abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow; + vover_pct = div64_s64(100 * vover, ioc->period_us * now->vrate); + + if (vover_pct <= MIN_DELAY_THR_PCT) + new_delay = 0; + else if (vover_pct >= MAX_DELAY_THR_PCT) + new_delay = MAX_DELAY; + else + new_delay = MIN_DELAY + + div_u64((MAX_DELAY - MIN_DELAY) * + (vover_pct - MIN_DELAY_THR_PCT), + MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT); - /* - * Clear or maintain depending on the overage. Non-zero vdebt is what - * guarantees that @iocg is online and future iocg_kick_delay() will - * clear use_delay. Don't leave it on when there's no vdebt. - */ - if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) { - blkcg_clear_delay(blkg); - return false; + /* pick the higher one and apply */ + if (new_delay > delay) { + iocg->delay = new_delay; + iocg->delay_at = now->now; + delay = new_delay; } - if (!atomic_read(&blkg->use_delay) && - time_before_eq64(vtime, now->vnow + ioc->margins.target)) - return false; - - /* use delay */ - delta_ns = DIV64_U64_ROUND_UP(vtime - now->vnow, - now->vrate) * NSEC_PER_USEC; - blkcg_set_delay(blkg, delta_ns); - expires = now->now_ns + delta_ns; - /* if already active and close enough, don't bother */ - oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); - if (hrtimer_is_queued(&iocg->delay_timer) && - abs(oexpires - expires) <= ioc->timer_slack_ns) + if (delay >= MIN_DELAY) { + blkcg_set_delay(blkg, delay * NSEC_PER_USEC); return true; - - hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), - ioc->timer_slack_ns, HRTIMER_MODE_ABS); - return true; -} - -static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) -{ - struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer); - struct ioc_now now; - unsigned long flags; - - spin_lock_irqsave(&iocg->waitq.lock, flags); - ioc_now(iocg->ioc, &now); - iocg_kick_delay(iocg, &now); - spin_unlock_irqrestore(&iocg->waitq.lock, flags); - - return HRTIMER_NORESTART; + } else { + iocg->delay = 0; + blkcg_clear_delay(blkg); + return false; + } } static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, @@ -1356,9 +1376,10 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, atomic64_add(vpay, &iocg->done_vtime); iocg_pay_debt(iocg, abs_vpay, now); vbudget -= vpay; + } + if (iocg->abs_vdebt || iocg->delay) iocg_kick_delay(iocg, now); - } /* * Debt can still be outstanding if we haven't paid all yet or the @@ -1906,12 +1927,13 @@ static void ioc_timer_fn(struct timer_list *timer) */ list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && - !iocg_is_idle(iocg)) + !iocg->delay && !iocg_is_idle(iocg)) continue; spin_lock(&iocg->waitq.lock); - if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) { + if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt || + iocg->delay) { /* might be oversleeping vtime / hweight changes, kick */ iocg_kick_waitq(iocg, true, &now); } else if (iocg_is_idle(iocg)) { @@ -2641,8 +2663,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd) init_waitqueue_head(&iocg->waitq); hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); iocg->waitq_timer.function = iocg_waitq_timer_fn; - hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - iocg->delay_timer.function = iocg_delay_timer_fn; iocg->level = blkg->blkcg->css.cgroup->level; @@ -2679,7 +2699,6 @@ static void ioc_pd_free(struct blkg_policy_data *pd) spin_unlock_irqrestore(&ioc->lock, flags); hrtimer_cancel(&iocg->waitq_timer); - hrtimer_cancel(&iocg->delay_timer); } free_percpu(iocg->pcpu_stat); kfree(iocg); -- cgit v1.2.3 From dda1315f18536cb94e293a3ae0ccb5c4df53e5e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:53 -0400 Subject: blk-iocost: halve debts if device stays idle A low weight iocg can amass a large amount of debt, for example, when anonymous memory gets reclaimed aggressively. If the system has a lot of memory paired with a slow IO device, the debt can span multiple seconds or more. If there are no other subsequent IO issuers, the in-debt iocg may end up blocked paying its debt while the IO device is idle. This patch implements a mechanism to protect against such pathological cases. If the device has been sufficiently idle for a substantial amount of time, the debts are halved. The criteria are on the conservative side as we want to resolve the rare extreme cases without impacting regular operation by forgiving debts too readily. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9cb8f29f01f5..2a95a081cf44 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -295,6 +295,13 @@ enum { MIN_DELAY = 250, MAX_DELAY = 250 * USEC_PER_MSEC, + /* + * Halve debts if total usage keeps staying under 25% w/o any shortages + * for over 100ms. + */ + DEBT_BUSY_USAGE_PCT = 25, + DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC, + /* don't let cmds which take a very long time pin lagging for too long */ MAX_LAGGING_PERIODS = 10, @@ -436,6 +443,9 @@ struct ioc { bool weights_updated; atomic_t hweight_gen; /* for lazy hweights */ + /* the last time debt cancel condition wasn't met */ + u64 debt_busy_at; + u64 autop_too_fast_at; u64 autop_too_slow_at; int autop_idx; @@ -1216,6 +1226,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) if (ioc->running == IOC_IDLE) { ioc->running = IOC_RUNNING; + ioc->debt_busy_at = now->now; ioc_start_period(ioc, now); } @@ -1896,7 +1907,8 @@ static void ioc_timer_fn(struct timer_list *timer) struct ioc_gq *iocg, *tiocg; struct ioc_now now; LIST_HEAD(surpluses); - int nr_shortages = 0, nr_lagging = 0; + int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0; + u64 usage_us_sum = 0; u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; u32 missed_ppm[2], rq_wait_pct; @@ -1936,6 +1948,8 @@ static void ioc_timer_fn(struct timer_list *timer) iocg->delay) { /* might be oversleeping vtime / hweight changes, kick */ iocg_kick_waitq(iocg, true, &now); + if (iocg->abs_vdebt) + nr_debtors++; } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ __propagate_weights(iocg, 0, 0, false, &now); @@ -1978,6 +1992,7 @@ static void ioc_timer_fn(struct timer_list *timer) * high-latency completions appearing as idle. */ usage_us = iocg->usage_delta_us; + usage_us_sum += usage_us; if (vdone != vtime) { u64 inflight_us = DIV64_U64_ROUND_UP( @@ -2036,6 +2051,38 @@ static void ioc_timer_fn(struct timer_list *timer) list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) list_del_init(&iocg->surplus_list); + /* + * A low weight iocg can amass a large amount of debt, for example, when + * anonymous memory gets reclaimed aggressively. If the system has a lot + * of memory paired with a slow IO device, the debt can span multiple + * seconds or more. If there are no other subsequent IO issuers, the + * in-debt iocg may end up blocked paying its debt while the IO device + * is idle. + * + * The following protects against such pathological cases. If the device + * has been sufficiently idle for a substantial amount of time, the + * debts are halved. The criteria are on the conservative side as we + * want to resolve the rare extreme cases without impacting regular + * operation by forgiving debts too readily. + */ + if (nr_shortages || + div64_u64(100 * usage_us_sum, now.now - ioc->period_at) >= + DEBT_BUSY_USAGE_PCT) + ioc->debt_busy_at = now.now; + + if (nr_debtors && + now.now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) { + list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { + if (iocg->abs_vdebt) { + spin_lock(&iocg->waitq.lock); + iocg->abs_vdebt /= 2; + iocg_kick_waitq(iocg, true, &now); + spin_unlock(&iocg->waitq.lock); + } + } + ioc->debt_busy_at = now.now; + } + /* * If q is getting clogged or we're missing too much, we're issuing * too much IO and should lower vtime rate. If we're not missing -- cgit v1.2.3 From ac33e91e2daca40fcad66c68712276da7b40f169 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:54 -0400 Subject: blk-iocost: implement vtime loss compensation When an iocg accumulates too much vtime or gets deactivated, we throw away some vtime, which lowers the overall device utilization. As the exact amount which is being thrown away is known, we can compensate by accelerating the vrate accordingly so that the extra vtime generated in the current period matches what got lost. This significantly improves work conservation when involving high weight cgroups with intermittent and bursty IO patterns. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 132 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 90 insertions(+), 42 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 2a95a081cf44..0270a504e6b5 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -224,20 +224,12 @@ enum { MARGIN_MIN_PCT = 10, MARGIN_LOW_PCT = 20, MARGIN_TARGET_PCT = 50, - MARGIN_MAX_PCT = 100, INUSE_ADJ_STEP_PCT = 25, /* Have some play in timer operations */ TIMER_SLACK_PCT = 1, - /* - * vtime can wrap well within a reasonable uptime when vrate is - * consistently raised. Don't trust recorded cgroup vtime if the - * period counter indicates that it's older than 5mins. - */ - VTIME_VALID_DUR = 300 * USEC_PER_SEC, - /* 1/64k is granular enough and can easily be handled w/ u32 */ WEIGHT_ONE = 1 << 16, @@ -395,7 +387,6 @@ struct ioc_margins { s64 min; s64 low; s64 target; - s64 max; }; struct ioc_missed { @@ -432,6 +423,8 @@ struct ioc { enum ioc_running running; atomic64_t vtime_rate; + u64 vtime_base_rate; + s64 vtime_err; seqcount_spinlock_t period_seqcount; u64 period_at; /* wallclock starttime */ @@ -760,12 +753,11 @@ static void ioc_refresh_margins(struct ioc *ioc) { struct ioc_margins *margins = &ioc->margins; u32 period_us = ioc->period_us; - u64 vrate = atomic64_read(&ioc->vtime_rate); + u64 vrate = ioc->vtime_base_rate; margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate; margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate; - margins->max = (period_us * MARGIN_MAX_PCT / 100) * vrate; } /* latency Qos params changed, update period_us and all the dependent params */ @@ -831,8 +823,7 @@ static int ioc_autop_idx(struct ioc *ioc) return idx; /* step up/down based on the vrate */ - vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100, - VTIME_PER_USEC); + vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); now_ns = ktime_get_ns(); if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) { @@ -940,6 +931,43 @@ static bool ioc_refresh_params(struct ioc *ioc, bool force) return true; } +/* + * When an iocg accumulates too much vtime or gets deactivated, we throw away + * some vtime, which lowers the overall device utilization. As the exact amount + * which is being thrown away is known, we can compensate by accelerating the + * vrate accordingly so that the extra vtime generated in the current period + * matches what got lost. + */ +static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) +{ + s64 pleft = ioc->period_at + ioc->period_us - now->now; + s64 vperiod = ioc->period_us * ioc->vtime_base_rate; + s64 vcomp, vcomp_min, vcomp_max; + + lockdep_assert_held(&ioc->lock); + + /* we need some time left in this period */ + if (pleft <= 0) + goto done; + + /* + * Calculate how much vrate should be adjusted to offset the error. + * Limit the amount of adjustment and deduct the adjusted amount from + * the error. + */ + vcomp = -div64_s64(ioc->vtime_err, pleft); + vcomp_min = -(ioc->vtime_base_rate >> 1); + vcomp_max = ioc->vtime_base_rate; + vcomp = clamp(vcomp, vcomp_min, vcomp_max); + + ioc->vtime_err += vcomp * pleft; + + atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); +done: + /* bound how much error can accumulate */ + ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); +} + /* take a snapshot of the current [v]time and vrate */ static void ioc_now(struct ioc *ioc, struct ioc_now *now) { @@ -1152,8 +1180,8 @@ static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; - u64 last_period, cur_period, max_period_delta; - u64 vtime, vmin; + u64 last_period, cur_period; + u64 vtime, vtarget; int i; /* @@ -1192,21 +1220,15 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) goto fail_unlock; /* - * vtime may wrap when vrate is raised substantially due to - * underestimated IO costs. Look at the period and ignore its - * vtime if the iocg has been idle for too long. Also, cap the - * budget it can start with to the margin. + * Always start with the target budget. On deactivation, we throw away + * anything above it. */ - max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us); + vtarget = now->vnow - ioc->margins.target; vtime = atomic64_read(&iocg->vtime); - vmin = now->vnow - ioc->margins.max; - if (last_period + max_period_delta < cur_period || - time_before64(vtime, vmin)) { - atomic64_add(vmin - vtime, &iocg->vtime); - atomic64_add(vmin - vtime, &iocg->done_vtime); - vtime = vmin; - } + atomic64_add(vtarget - vtime, &iocg->vtime); + atomic64_add(vtarget - vtime, &iocg->done_vtime); + vtime = vtarget; /* * Activate, propagate weight and start period timer if not @@ -1260,7 +1282,8 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) current_hweight(iocg, &hwa, NULL); vover = atomic64_read(&iocg->vtime) + abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow; - vover_pct = div64_s64(100 * vover, ioc->period_us * now->vrate); + vover_pct = div64_s64(100 * vover, + ioc->period_us * ioc->vtime_base_rate); if (vover_pct <= MIN_DELAY_THR_PCT) new_delay = 0; @@ -1421,7 +1444,8 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, /* determine next wakeup, add a timer margin to guarantee chunking */ vshortage = -ctx.vbudget; expires = now->now_ns + - DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC; + DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * + NSEC_PER_USEC; expires += ioc->timer_slack_ns; /* if already active and close enough, don't bother */ @@ -1536,6 +1560,7 @@ static void iocg_build_inner_walk(struct ioc_gq *iocg, /* collect per-cpu counters and propagate the deltas to the parent */ static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) { + struct ioc *ioc = iocg->ioc; struct iocg_stat new_stat; u64 abs_vusage = 0; u64 vusage_delta; @@ -1551,7 +1576,7 @@ static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; iocg->last_stat_abs_vusage = abs_vusage; - iocg->usage_delta_us = div64_u64(vusage_delta, now->vrate); + iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); iocg->local_stat.usage_us += iocg->usage_delta_us; new_stat.usage_us = @@ -1593,8 +1618,8 @@ static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now) * capacity. @hwm is the upper bound and used to signal no donation. This * function also throws away @iocg's excess budget. */ -static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, - struct ioc_now *now) +static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm, + u32 usage, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; u64 vtime = atomic64_read(&iocg->vtime); @@ -1609,12 +1634,13 @@ static u32 hweight_after_donation(struct ioc_gq *iocg, u32 hwm, u32 usage, time_after64(vtime, now->vnow - ioc->margins.min)) return hwm; - /* throw away excess above max */ - excess = now->vnow - vtime - ioc->margins.max; + /* throw away excess above target */ + excess = now->vnow - vtime - ioc->margins.target; if (excess > 0) { atomic64_add(excess, &iocg->vtime); atomic64_add(excess, &iocg->done_vtime); vtime += excess; + ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); } /* @@ -1952,6 +1978,24 @@ static void ioc_timer_fn(struct timer_list *timer) nr_debtors++; } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ + u64 vtime = atomic64_read(&iocg->vtime); + s64 excess; + + /* + * @iocg has been inactive for a full duration and will + * have a high budget. Account anything above target as + * error and throw away. On reactivation, it'll start + * with the target budget. + */ + excess = now.vnow - vtime - ioc->margins.target; + if (excess > 0) { + u32 old_hwi; + + current_hweight(iocg, NULL, &old_hwi); + ioc->vtime_err -= div64_u64(excess * old_hwi, + WEIGHT_ONE); + } + __propagate_weights(iocg, 0, 0, false, &now); list_del_init(&iocg->active_list); } @@ -1997,7 +2041,7 @@ static void ioc_timer_fn(struct timer_list *timer) if (vdone != vtime) { u64 inflight_us = DIV64_U64_ROUND_UP( cost_to_abs_cost(vtime - vdone, hw_inuse), - now.vrate); + ioc->vtime_base_rate); usage_us = max(usage_us, inflight_us); } @@ -2017,16 +2061,16 @@ static void ioc_timer_fn(struct timer_list *timer) if (hw_inuse < hw_active || (!waitqueue_active(&iocg->waitq) && time_before64(vtime, now.vnow - ioc->margins.low))) { - u32 hwa, hwm, new_hwi; + u32 hwa, old_hwi, hwm, new_hwi; /* * Already donating or accumulated enough to start. * Determine the donation amount. */ - current_hweight(iocg, &hwa, NULL); + current_hweight(iocg, &hwa, &old_hwi); hwm = current_hweight_max(iocg); - new_hwi = hweight_after_donation(iocg, hwm, usage, - &now); + new_hwi = hweight_after_donation(iocg, old_hwi, hwm, + usage, &now); if (new_hwi < hwm) { iocg->hweight_donating = hwa; iocg->hweight_after_donation = new_hwi; @@ -2130,7 +2174,7 @@ static void ioc_timer_fn(struct timer_list *timer) ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) { - u64 vrate = atomic64_read(&ioc->vtime_rate); + u64 vrate = ioc->vtime_base_rate; u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; /* rq_wait signal is always reliable, ignore user vrate_min */ @@ -2167,7 +2211,7 @@ static void ioc_timer_fn(struct timer_list *timer) trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, nr_lagging, nr_shortages); - atomic64_set(&ioc->vtime_rate, vrate); + ioc->vtime_base_rate = vrate; ioc_refresh_margins(ioc); } else if (ioc->busy_level != prev_busy_level || nr_lagging) { trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), @@ -2188,8 +2232,11 @@ static void ioc_timer_fn(struct timer_list *timer) ioc_start_period(ioc, &now); } else { ioc->busy_level = 0; + ioc->vtime_err = 0; ioc->running = IOC_IDLE; } + + ioc_refresh_vrate(ioc, &now); } spin_unlock_irq(&ioc->lock); @@ -2628,6 +2675,7 @@ static int blk_iocost_init(struct request_queue *q) INIT_LIST_HEAD(&ioc->active_iocgs); ioc->running = IOC_IDLE; + ioc->vtime_base_rate = VTIME_PER_USEC; atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); ioc->period_at = ktime_to_us(ktime_get()); @@ -2762,7 +2810,7 @@ static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) if (iocg->level == 0) { unsigned vp10k = DIV64_U64_ROUND_CLOSEST( - atomic64_read(&ioc->vtime_rate) * 10000, + ioc->vtime_base_rate * 10000, VTIME_PER_USEC); pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100); -- cgit v1.2.3 From 046037551721e8831f6718ac2149887f6bb1f802 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:55 -0400 Subject: blk-iocost: restore inuse update tracepoints Update and restore the inuse update tracepoints. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 0270a504e6b5..9366527d8c12 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1919,6 +1919,12 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) inuse = DIV64_U64_ROUND_UP( parent->child_adjusted_sum * iocg->hweight_after_donation, parent->hweight_inuse); + + TRACE_IOCG_PATH(inuse_transfer, iocg, now, + iocg->inuse, inuse, + iocg->hweight_inuse, + iocg->hweight_after_donation); + __propagate_weights(iocg, iocg->active, inuse, true, now); } @@ -2076,6 +2082,10 @@ static void ioc_timer_fn(struct timer_list *timer) iocg->hweight_after_donation = new_hwi; list_add(&iocg->surplus_list, &surpluses); } else { + TRACE_IOCG_PATH(inuse_shortage, iocg, &now, + iocg->inuse, iocg->active, + iocg->hweight_inuse, new_hwi); + __propagate_weights(iocg, iocg->active, iocg->active, true, &now); nr_shortages++; @@ -2248,11 +2258,13 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, struct ioc *ioc = iocg->ioc; struct ioc_margins *margins = &ioc->margins; u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); + u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi; u32 hwi; s64 margin; u64 cost, new_inuse; current_hweight(iocg, NULL, &hwi); + old_hwi = hwi; cost = abs_cost_to_cost(abs_cost, hwi); margin = now->vnow - vtime - cost; @@ -2287,6 +2299,10 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, iocg->inuse != iocg->active); spin_unlock_irq(&ioc->lock); + + TRACE_IOCG_PATH(inuse_adjust, iocg, now, + old_inuse, iocg->inuse, old_hwi, hwi); + return cost; } -- cgit v1.2.3 From f0bf84a5dffa0800c4f8ae52a75c579978086901 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 1 Sep 2020 14:52:56 -0400 Subject: blk-iocost: add three debug stat - cost.wait, indebt and indelay These are really cheap to collect and can be useful in debugging iocost behavior. Add them as debug stats for now. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9366527d8c12..fc897bb142bc 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -452,6 +452,9 @@ struct iocg_pcpu_stat { struct iocg_stat { u64 usage_us; + u64 wait_us; + u64 indebt_us; + u64 indelay_us; }; /* per device-cgroup pair */ @@ -538,6 +541,9 @@ struct ioc_gq { struct iocg_stat last_stat; u64 last_stat_abs_vusage; u64 usage_delta_us; + u64 wait_since; + u64 indebt_since; + u64 indelay_since; /* this iocg's depth in the hierarchy and ancestors including self */ int level; @@ -1303,9 +1309,15 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) } if (delay >= MIN_DELAY) { + if (!iocg->indelay_since) + iocg->indelay_since = now->now; blkcg_set_delay(blkg, delay * NSEC_PER_USEC); return true; } else { + if (iocg->indelay_since) { + iocg->local_stat.indelay_us += now->now - iocg->indelay_since; + iocg->indelay_since = 0; + } iocg->delay = 0; blkcg_clear_delay(blkg); return false; @@ -1325,8 +1337,10 @@ static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, * Once in debt, debt handling owns inuse. @iocg stays at the minimum * inuse donating all of it share to others until its debt is paid off. */ - if (!iocg->abs_vdebt && abs_cost) + if (!iocg->abs_vdebt && abs_cost) { + iocg->indebt_since = now->now; propagate_weights(iocg, iocg->active, 0, false, now); + } iocg->abs_vdebt += abs_cost; @@ -1348,9 +1362,13 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); /* if debt is paid in full, restore inuse */ - if (!iocg->abs_vdebt) + if (!iocg->abs_vdebt) { + iocg->local_stat.indebt_us += now->now - iocg->indebt_since; + iocg->indebt_since = 0; + propagate_weights(iocg, iocg->active, iocg->last_inuse, false, now); + } } static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, @@ -1436,8 +1454,17 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); - if (!waitqueue_active(&iocg->waitq)) + if (!waitqueue_active(&iocg->waitq)) { + if (iocg->wait_since) { + iocg->local_stat.wait_us += now->now - iocg->wait_since; + iocg->wait_since = 0; + } return; + } + + if (!iocg->wait_since) + iocg->wait_since = now->now; + if (WARN_ON_ONCE(ctx.vbudget >= 0)) return; @@ -1579,8 +1606,15 @@ static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); iocg->local_stat.usage_us += iocg->usage_delta_us; + /* propagate upwards */ new_stat.usage_us = iocg->local_stat.usage_us + iocg->desc_stat.usage_us; + new_stat.wait_us = + iocg->local_stat.wait_us + iocg->desc_stat.wait_us; + new_stat.indebt_us = + iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us; + new_stat.indelay_us = + iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us; /* propagate the deltas to the parent */ if (iocg->level > 0) { @@ -1589,6 +1623,12 @@ static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) parent_stat->usage_us += new_stat.usage_us - iocg->last_stat.usage_us; + parent_stat->wait_us += + new_stat.wait_us - iocg->last_stat.wait_us; + parent_stat->indebt_us += + new_stat.indebt_us - iocg->last_stat.indebt_us; + parent_stat->indelay_us += + new_stat.indelay_us - iocg->last_stat.indelay_us; } iocg->last_stat = new_stat; @@ -1961,8 +2001,6 @@ static void ioc_timer_fn(struct timer_list *timer) return; } - iocg_flush_stat(&ioc->active_iocgs, &now); - /* * Waiters determine the sleep durations based on the vrate they * saw at the time of sleep. If vrate has increased, some waiters @@ -1976,6 +2014,22 @@ static void ioc_timer_fn(struct timer_list *timer) spin_lock(&iocg->waitq.lock); + /* flush wait and indebt stat deltas */ + if (iocg->wait_since) { + iocg->local_stat.wait_us += now.now - iocg->wait_since; + iocg->wait_since = now.now; + } + if (iocg->indebt_since) { + iocg->local_stat.indebt_us += + now.now - iocg->indebt_since; + iocg->indebt_since = now.now; + } + if (iocg->indelay_since) { + iocg->local_stat.indelay_us += + now.now - iocg->indelay_since; + iocg->indelay_since = now.now; + } + if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt || iocg->delay) { /* might be oversleeping vtime / hweight changes, kick */ @@ -2010,6 +2064,12 @@ static void ioc_timer_fn(struct timer_list *timer) } commit_weights(ioc); + /* + * Wait and indebt stat are flushed above and the donation calculation + * below needs updated usage stat. Let's bring stat up-to-date. + */ + iocg_flush_stat(&ioc->active_iocgs, &now); + /* calc usage and see whether some weights need to be moved around */ list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { u64 vdone, vtime, usage_us, usage_dur; @@ -2835,6 +2895,13 @@ static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu", iocg->last_stat.usage_us); + if (blkcg_debug_stats) + pos += scnprintf(buf + pos, size - pos, + " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu", + iocg->last_stat.wait_us, + iocg->last_stat.indebt_us, + iocg->last_stat.indelay_us); + return pos; } -- cgit v1.2.3 From 9d3a39a5f1e45827b008fff1ee9cf3cac3409665 Mon Sep 17 00:00:00 2001 From: Khazhismel Kumykov Date: Mon, 24 Aug 2020 15:10:34 -0700 Subject: block: grant IOPRIO_CLASS_RT to CAP_SYS_NICE CAP_SYS_ADMIN is too broad, and ionice fits into CAP_SYS_NICE's grouping. Retain CAP_SYS_ADMIN permission for backwards compatibility. Signed-off-by: Khazhismel Kumykov Reviewed-by: Bart Van Assche Acked-by: Serge Hallyn Signed-off-by: Jens Axboe --- block/ioprio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/ioprio.c b/block/ioprio.c index 04ebd37966f1..364d2294ba90 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -69,7 +69,7 @@ int ioprio_check_cap(int ioprio) switch (class) { case IOPRIO_CLASS_RT: - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; /* rt has prio field too */ -- cgit v1.2.3 From 143d2600faf1563fd4125686ab84afaab5fb513b Mon Sep 17 00:00:00 2001 From: Ritika Srivastava Date: Tue, 1 Sep 2020 13:17:30 -0700 Subject: block: Return blk_status_t instead of errno codes Replace returning legacy errno codes with blk_status_t in blk_cloned_rq_check_limits(). Signed-off-by: Ritika Srivastava Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 062efdedc994..3fbb5b2d5385 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1145,14 +1145,14 @@ EXPORT_SYMBOL(submit_bio); * limits when retrying requests on other queues. Those requests need * to be checked against the new queue limits again during dispatch. */ -static int blk_cloned_rq_check_limits(struct request_queue *q, +static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, struct request *rq) { if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", __func__, blk_rq_sectors(rq), blk_queue_get_max_sectors(q, req_op(rq))); - return -EIO; + return BLK_STS_IOERR; } /* @@ -1165,10 +1165,10 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, if (rq->nr_phys_segments > queue_max_segments(q)) { printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n", __func__, rq->nr_phys_segments, queue_max_segments(q)); - return -EIO; + return BLK_STS_IOERR; } - return 0; + return BLK_STS_OK; } /** -- cgit v1.2.3 From 8327cce5ff9376fac9ff713a8d5c99c16ba3fa33 Mon Sep 17 00:00:00 2001 From: Ritika Srivastava Date: Tue, 1 Sep 2020 13:17:31 -0700 Subject: block: better deal with the delayed not supported case in blk_cloned_rq_check_limits If WRITE_ZERO/WRITE_SAME operation is not supported by the storage, blk_cloned_rq_check_limits() will return IO error which will cause device-mapper to fail the paths. Instead, if the queue limit is set to 0, return BLK_STS_NOTSUPP. BLK_STS_NOTSUPP will be ignored by device-mapper and will not fail the paths. Suggested-by: Martin K. Petersen Signed-off-by: Ritika Srivastava Reviewed-by: Martin K. Petersen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 3fbb5b2d5385..9f2a99abeeb9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1148,10 +1148,24 @@ EXPORT_SYMBOL(submit_bio); static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, struct request *rq) { - if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { + unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); + + if (blk_rq_sectors(rq) > max_sectors) { + /* + * SCSI device does not have a good way to return if + * Write Same/Zero is actually supported. If a device rejects + * a non-read/write command (discard, write same,etc.) the + * low-level device driver will set the relevant queue limit to + * 0 to prevent blk-lib from issuing more of the offending + * operations. Commands queued prior to the queue limit being + * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O + * errors being propagated to upper layers. + */ + if (max_sectors == 0) + return BLK_STS_NOTSUPP; + printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", - __func__, blk_rq_sectors(rq), - blk_queue_get_max_sectors(q, req_op(rq))); + __func__, blk_rq_sectors(rq), max_sectors); return BLK_STS_IOERR; } @@ -1178,8 +1192,11 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, */ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) { - if (blk_cloned_rq_check_limits(q, rq)) - return BLK_STS_IOERR; + blk_status_t ret; + + ret = blk_cloned_rq_check_limits(q, rq); + if (ret != BLK_STS_OK) + return ret; if (rq->rq_disk && should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) -- cgit v1.2.3 From 265600b7b6e88f3cc4cef2774afe0eb4abe72d72 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 2 Sep 2020 09:45:25 +0800 Subject: block: Remove a duplicative condition Remove a duplicative condition to remove below cppcheck warnings: "warning: Redundant condition: sched_allow_merge. '!A || (A && B)' is equivalent to '!A || B' [redundantCondition]" Reported-by: kernel test robot Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-merge.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index 80c974484a3f..6ed715835d45 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -996,13 +996,11 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, switch (blk_try_merge(rq, bio)) { case ELEVATOR_BACK_MERGE: - if (!sched_allow_merge || - (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio))) + if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) return bio_attempt_back_merge(rq, bio, nr_segs); break; case ELEVATOR_FRONT_MERGE: - if (!sched_allow_merge || - (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio))) + if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) return bio_attempt_front_merge(rq, bio, nr_segs); break; case ELEVATOR_DISCARD_MERGE: -- cgit v1.2.3 From f4ad06f2bb8476548b08f89919ee65abc4e40212 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 1 Sep 2020 17:57:42 +0200 Subject: block: rename bd_invalidated Replace bd_invalidate with a new BDEV_NEED_PART_SCAN flag in a bd_flags variable to better describe the condition. Signed-off-by: Christoph Hellwig Reviewed-by: Josef Bacik Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/genhd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 5fc6d82e6c68..a36be970b767 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -732,7 +732,7 @@ static void register_disk(struct device *parent, struct gendisk *disk, if (!bdev) goto exit; - bdev->bd_invalidated = 1; + set_bit(BDEV_NEED_PART_SCAN, &bdev->bd_flags); err = blkdev_get(bdev, FMODE_READ, NULL); if (err < 0) goto exit; -- cgit v1.2.3 From b8086d3f5a0e88b1912d55a158b8a6a43ad6604b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 1 Sep 2020 17:57:44 +0200 Subject: block: use revalidate_disk_size in set_capacity_revalidate_and_notify Only virtio_blk and xen-blkfront set the revalidate argument to true, and both do not implement the ->revalidate_disk method. So switch to the helper that just updates the size instead. Signed-off-by: Christoph Hellwig Reviewed-by: Josef Bacik Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/genhd.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index a36be970b767..081f1039d936 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -50,14 +50,13 @@ static void disk_release_events(struct gendisk *disk); * zero and will not be set to zero */ void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, - bool revalidate) + bool update_bdev) { sector_t capacity = get_capacity(disk); set_capacity(disk, size); - - if (revalidate) - revalidate_disk(disk); + if (update_bdev) + revalidate_disk_size(disk, true); if (capacity != size && capacity != 0 && size != 0) { char *envp[] = { "RESIZE=1", NULL }; -- cgit v1.2.3 From 51db1c37ee166159c5753ce8d64d6bacf113e0f0 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Wed, 19 Aug 2020 23:20:19 +0800 Subject: blk-mq: Rename BLK_MQ_F_TAG_SHARED as BLK_MQ_F_TAG_QUEUE_SHARED BLK_MQ_F_TAG_SHARED actually means that tags is shared among request queues, all of which should belong to LUNs attached to same HBA. So rename it to make the point explicitly. [jpg: rebase a few times, add rnbd-clt.c change] Suggested-by: Bart Van Assche Signed-off-by: Ming Lei Signed-off-by: John Garry Tested-by: Douglas Gilbert Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 2 +- block/blk-mq-tag.h | 6 +++--- block/blk-mq.c | 20 ++++++++++---------- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'block') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3f09bcb8a6fd..a4597febff69 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -240,7 +240,7 @@ static const char *const alloc_policy_name[] = { #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(SHOULD_MERGE), - HCTX_FLAG_NAME(TAG_SHARED), + HCTX_FLAG_NAME(TAG_QUEUE_SHARED), HCTX_FLAG_NAME(BLOCKING), HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(STACKING), diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index b1acac518c4e..918e2cee4f43 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -56,7 +56,7 @@ extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { - if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) return false; return __blk_mq_tag_busy(hctx); @@ -64,7 +64,7 @@ static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { - if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) return; __blk_mq_tag_idle(hctx); @@ -79,7 +79,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, { unsigned int depth, users; - if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) return true; if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; diff --git a/block/blk-mq.c b/block/blk-mq.c index b3d2785eefe9..75031fa1709c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1124,7 +1124,7 @@ static bool blk_mq_get_driver_tag(struct request *rq) if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) return false; - if ((hctx->flags & BLK_MQ_F_TAG_SHARED) && + if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && !(rq->rq_flags & RQF_MQ_INFLIGHT)) { rq->rq_flags |= RQF_MQ_INFLIGHT; atomic_inc(&hctx->nr_active); @@ -1168,7 +1168,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, wait_queue_entry_t *wait; bool ret; - if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { + if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { blk_mq_sched_mark_restart_hctx(hctx); /* @@ -1420,7 +1420,7 @@ out: bool needs_restart; /* For non-shared tags, the RESTART check will suffice */ bool no_tag = prep == PREP_DISPATCH_NO_TAG && - (hctx->flags & BLK_MQ_F_TAG_SHARED); + (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET; blk_mq_release_budgets(q, nr_budgets); @@ -2668,7 +2668,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; - hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED; + hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; INIT_LIST_HEAD(&hctx->hctx_list); @@ -2885,9 +2885,9 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) queue_for_each_hw_ctx(q, hctx, i) { if (shared) - hctx->flags |= BLK_MQ_F_TAG_SHARED; + hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; else - hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; } } @@ -2913,7 +2913,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) list_del(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ - set->flags &= ~BLK_MQ_F_TAG_SHARED; + set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_depth(set, false); } @@ -2930,12 +2930,12 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, * Check to see if we're transitioning to shared (from 1 to 2 queues). */ if (!list_empty(&set->tag_list) && - !(set->flags & BLK_MQ_F_TAG_SHARED)) { - set->flags |= BLK_MQ_F_TAG_SHARED; + !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { + set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_depth(set, true); } - if (set->flags & BLK_MQ_F_TAG_SHARED) + if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); list_add_tail(&q->tag_set_list, &set->tag_list); -- cgit v1.2.3 From 655ac30094da0d724ce66161f15675d30da89cd8 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 19 Aug 2020 23:20:20 +0800 Subject: blk-mq: Rename blk_mq_update_tag_set_depth() The function does not set the depth, but rather transitions from shared to non-shared queues and vice versa. So rename it to blk_mq_update_tag_set_shared() to better reflect its purpose. [jpg: take out some unrelated changes in blk_mq_init_bitmap_tags()] Signed-off-by: Hannes Reinecke Signed-off-by: John Garry Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 75031fa1709c..cae46701579c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2891,8 +2891,8 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) } } -static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, - bool shared) +static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, + bool shared) { struct request_queue *q; @@ -2915,7 +2915,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ - blk_mq_update_tag_set_depth(set, false); + blk_mq_update_tag_set_shared(set, false); } mutex_unlock(&set->tag_list_lock); INIT_LIST_HEAD(&q->tag_set_list); @@ -2933,7 +2933,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ - blk_mq_update_tag_set_depth(set, true); + blk_mq_update_tag_set_shared(set, true); } if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); -- cgit v1.2.3 From 4d063237b92a4e8d99350ad05492a45009a3c78c Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 19 Aug 2020 23:20:21 +0800 Subject: blk-mq: Free tags in blk_mq_init_tags() upon error Since the tags are allocated in blk_mq_init_tags(), it's better practice to free in that same function upon error, rather than a callee which is to init the bitmap tags (blk_mq_init_tags()). [jpg: Split from an earlier patch with a new commit message] Signed-off-by: Hannes Reinecke Signed-off-by: John Garry Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32d82e23b095..c2c9f369d777 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -429,24 +429,22 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, node); } -static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, - int node, int alloc_policy) +static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node, int alloc_policy) { unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) - goto free_tags; + return -ENOMEM; if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, node)) goto free_bitmap_tags; - return tags; + return 0; free_bitmap_tags: sbitmap_queue_free(&tags->bitmap_tags); -free_tags: - kfree(tags); - return NULL; + return -ENOMEM; } struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, @@ -467,7 +465,11 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; - return blk_mq_init_bitmap_tags(tags, node, alloc_policy); + if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { + kfree(tags); + return NULL; + } + return tags; } void blk_mq_free_tags(struct blk_mq_tags *tags) -- cgit v1.2.3 From 1c0706a70a64eda48477caa880657e07f5196512 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:22 +0800 Subject: blk-mq: Pass flags for tag init/free Pass hctx/tagset flags argument down to blk_mq_init_tags() and blk_mq_free_tags() for selective init/free. For now, make it include the alloc policy flag, which can be evaluated when needed (in blk_mq_init_tags()). Signed-off-by: John Garry Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 11 ++++++++--- block/blk-mq-tag.c | 12 +++++++----- block/blk-mq-tag.h | 7 ++++--- block/blk-mq.c | 23 +++++++++++++---------- block/blk-mq.h | 5 +++-- 5 files changed, 35 insertions(+), 23 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 501a85ceaccb..edfc8ea805bc 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -560,9 +560,11 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { + unsigned int flags = set->flags; + if (hctx->sched_tags) { blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); - blk_mq_free_rq_map(hctx->sched_tags); + blk_mq_free_rq_map(hctx->sched_tags, flags); hctx->sched_tags = NULL; } } @@ -572,10 +574,11 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; + unsigned int flags = set->flags; int ret; hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, - set->reserved_tags); + set->reserved_tags, flags); if (!hctx->sched_tags) return -ENOMEM; @@ -593,8 +596,10 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { + unsigned int flags = hctx->flags; + if (hctx->sched_tags) { - blk_mq_free_rq_map(hctx->sched_tags); + blk_mq_free_rq_map(hctx->sched_tags, flags); hctx->sched_tags = NULL; } } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c2c9f369d777..0426f01e06a6 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -449,8 +449,9 @@ free_bitmap_tags: struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, - int node, int alloc_policy) + int node, unsigned int flags) { + int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags); struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { @@ -472,7 +473,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, return tags; } -void blk_mq_free_tags(struct blk_mq_tags *tags) +void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) { sbitmap_queue_free(&tags->bitmap_tags); sbitmap_queue_free(&tags->breserved_tags); @@ -494,6 +495,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, */ if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; + unsigned int flags = set->flags; struct blk_mq_tags *new; bool ret; @@ -508,17 +510,17 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, return -EINVAL; new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, - tags->nr_reserved_tags); + tags->nr_reserved_tags, flags); if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); if (ret) { - blk_mq_free_rq_map(new); + blk_mq_free_rq_map(new, flags); return -ENOMEM; } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr); + blk_mq_free_rq_map(*tagsptr, flags); *tagsptr = new; } else { /* diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 918e2cee4f43..391f68d32fa1 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -21,9 +21,10 @@ struct blk_mq_tags { struct list_head page_list; }; - -extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy); -extern void blk_mq_free_tags(struct blk_mq_tags *tags); +extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, + unsigned int reserved_tags, + int node, unsigned int flags); +extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, diff --git a/block/blk-mq.c b/block/blk-mq.c index cae46701579c..a39693dbb3c8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2296,20 +2296,21 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } -void blk_mq_free_rq_map(struct blk_mq_tags *tags) +void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) { kfree(tags->rqs); tags->rqs = NULL; kfree(tags->static_rqs); tags->static_rqs = NULL; - blk_mq_free_tags(tags); + blk_mq_free_tags(tags, flags); } struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, - unsigned int reserved_tags) + unsigned int reserved_tags, + unsigned int flags) { struct blk_mq_tags *tags; int node; @@ -2318,8 +2319,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, if (node == NUMA_NO_NODE) node = set->numa_node; - tags = blk_mq_init_tags(nr_tags, reserved_tags, node, - BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); + tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags); if (!tags) return NULL; @@ -2327,7 +2327,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) { - blk_mq_free_tags(tags); + blk_mq_free_tags(tags, flags); return NULL; } @@ -2336,7 +2336,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, node); if (!tags->static_rqs) { kfree(tags->rqs); - blk_mq_free_tags(tags); + blk_mq_free_tags(tags, flags); return NULL; } @@ -2745,10 +2745,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, int hctx_idx) { + unsigned int flags = set->flags; int ret = 0; set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, - set->queue_depth, set->reserved_tags); + set->queue_depth, set->reserved_tags, flags); if (!set->tags[hctx_idx]) return false; @@ -2757,7 +2758,7 @@ static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, if (!ret) return true; - blk_mq_free_rq_map(set->tags[hctx_idx]); + blk_mq_free_rq_map(set->tags[hctx_idx], flags); set->tags[hctx_idx] = NULL; return false; } @@ -2765,9 +2766,11 @@ static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, unsigned int hctx_idx) { + unsigned int flags = set->flags; + if (set->tags && set->tags[hctx_idx]) { blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); - blk_mq_free_rq_map(set->tags[hctx_idx]); + blk_mq_free_rq_map(set->tags[hctx_idx], flags); set->tags[hctx_idx] = NULL; } } diff --git a/block/blk-mq.h b/block/blk-mq.h index 863a2f3346d4..b86e7fe47789 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -53,11 +53,12 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); -void blk_mq_free_rq_map(struct blk_mq_tags *tags); +void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, - unsigned int reserved_tags); + unsigned int reserved_tags, + unsigned int flags); int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx, unsigned int depth); -- cgit v1.2.3 From 222a5ae03cdd12e7643f2f64689a39ed547650cb Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:23 +0800 Subject: blk-mq: Use pointers for blk_mq_tags bitmap tags Introduce pointers for the blk_mq_tags regular and reserved bitmap tags, with the goal of later being able to use a common shared tag bitmap across all HW contexts in a set. Signed-off-by: John Garry Tested-by: Don Brace #SCSI resv cmds patches used Tested-by: Douglas Gilbert Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 4 ++-- block/blk-mq-debugfs.c | 8 ++++---- block/blk-mq-tag.c | 41 ++++++++++++++++++++++------------------- block/blk-mq-tag.h | 7 +++++-- block/blk-mq.c | 8 ++++---- block/kyber-iosched.c | 4 ++-- 6 files changed, 39 insertions(+), 33 deletions(-) (limited to 'block') diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index c34b090178a9..418ffd4b5552 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6372,8 +6372,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) struct blk_mq_tags *tags = hctx->sched_tags; unsigned int min_shallow; - min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); - sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); + min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags); + sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow); } static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index a4597febff69..645b7f800cb8 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -452,11 +452,11 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m, atomic_read(&tags->active_queues)); seq_puts(m, "\nbitmap_tags:\n"); - sbitmap_queue_show(&tags->bitmap_tags, m); + sbitmap_queue_show(tags->bitmap_tags, m); if (tags->nr_reserved_tags) { seq_puts(m, "\nbreserved_tags:\n"); - sbitmap_queue_show(&tags->breserved_tags, m); + sbitmap_queue_show(tags->breserved_tags, m); } } @@ -487,7 +487,7 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->tags) - sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); + sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m); mutex_unlock(&q->sysfs_lock); out: @@ -521,7 +521,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->sched_tags) - sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); + sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m); mutex_unlock(&q->sysfs_lock); out: diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 0426f01e06a6..77fbeae05745 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -35,9 +35,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) */ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { - sbitmap_queue_wake_all(&tags->bitmap_tags); + sbitmap_queue_wake_all(tags->bitmap_tags); if (include_reserve) - sbitmap_queue_wake_all(&tags->breserved_tags); + sbitmap_queue_wake_all(tags->breserved_tags); } /* @@ -82,10 +82,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) WARN_ON_ONCE(1); return BLK_MQ_NO_TAG; } - bt = &tags->breserved_tags; + bt = tags->breserved_tags; tag_offset = 0; } else { - bt = &tags->bitmap_tags; + bt = tags->bitmap_tags; tag_offset = tags->nr_reserved_tags; } @@ -131,9 +131,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) - bt = &tags->breserved_tags; + bt = tags->breserved_tags; else - bt = &tags->bitmap_tags; + bt = tags->bitmap_tags; /* * If destination hw queue is changed, fake wake up on @@ -167,10 +167,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, const int real_tag = tag - tags->nr_reserved_tags; BUG_ON(real_tag >= tags->nr_tags); - sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); + sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); } else { BUG_ON(tag >= tags->nr_reserved_tags); - sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); + sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); } } @@ -298,9 +298,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); if (tags->nr_reserved_tags) - bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, + bt_tags_for_each(tags, tags->breserved_tags, fn, priv, flags | BT_TAG_ITER_RESERVED); - bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); + bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags); } /** @@ -416,8 +416,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, continue; if (tags->nr_reserved_tags) - bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); - bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); + bt_for_each(hctx, tags->breserved_tags, fn, priv, true); + bt_for_each(hctx, tags->bitmap_tags, fn, priv, false); } blk_queue_exit(q); } @@ -435,15 +435,18 @@ static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node)) + if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node)) return -ENOMEM; - if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin, - node)) + if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags, + round_robin, node)) goto free_bitmap_tags; + tags->bitmap_tags = &tags->__bitmap_tags; + tags->breserved_tags = &tags->__breserved_tags; + return 0; free_bitmap_tags: - sbitmap_queue_free(&tags->bitmap_tags); + sbitmap_queue_free(&tags->__bitmap_tags); return -ENOMEM; } @@ -475,8 +478,8 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) { - sbitmap_queue_free(&tags->bitmap_tags); - sbitmap_queue_free(&tags->breserved_tags); + sbitmap_queue_free(&tags->__bitmap_tags); + sbitmap_queue_free(&tags->__breserved_tags); kfree(tags); } @@ -527,7 +530,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ - sbitmap_queue_resize(&tags->bitmap_tags, + sbitmap_queue_resize(tags->bitmap_tags, tdepth - tags->nr_reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 391f68d32fa1..5b6f25bd1966 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -13,8 +13,11 @@ struct blk_mq_tags { atomic_t active_queues; - struct sbitmap_queue bitmap_tags; - struct sbitmap_queue breserved_tags; + struct sbitmap_queue *bitmap_tags; + struct sbitmap_queue *breserved_tags; + + struct sbitmap_queue __bitmap_tags; + struct sbitmap_queue __breserved_tags; struct request **rqs; struct request **static_rqs; diff --git a/block/blk-mq.c b/block/blk-mq.c index a39693dbb3c8..4982bb1d75db 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1096,14 +1096,14 @@ static inline unsigned int queued_to_index(unsigned int queued) static bool __blk_mq_get_driver_tag(struct request *rq) { - struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; + struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; int tag; blk_mq_tag_busy(rq->mq_hctx); if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { - bt = &rq->mq_hctx->tags->breserved_tags; + bt = rq->mq_hctx->tags->breserved_tags; tag_offset = 0; } @@ -1145,7 +1145,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, struct sbitmap_queue *sbq; list_del_init(&wait->entry); - sbq = &hctx->tags->bitmap_tags; + sbq = hctx->tags->bitmap_tags; atomic_dec(&sbq->ws_active); } spin_unlock(&hctx->dispatch_wait_lock); @@ -1163,7 +1163,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; + struct sbitmap_queue *sbq = hctx->tags->bitmap_tags; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 6d4ba0e9688e..dc89199bc8c6 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -359,7 +359,7 @@ static unsigned int kyber_sched_tags_shift(struct request_queue *q) * All of the hardware queues have the same depth, so we can just grab * the shift of the first one. */ - return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift; + return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift; } static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) @@ -502,7 +502,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) khd->batching = 0; hctx->sched_data = khd; - sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, + sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags, kqd->async_depth); return 0; -- cgit v1.2.3 From 32bc15afed04bd73e29d713d8db47818d6aa89af Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:24 +0800 Subject: blk-mq: Facilitate a shared sbitmap per tagset Some SCSI HBAs (such as HPSA, megaraid, mpt3sas, hisi_sas_v3 ..) support multiple reply queues with single hostwide tags. In addition, these drivers want to use interrupt assignment in pci_alloc_irq_vectors(PCI_IRQ_AFFINITY). However, as discussed in [0], CPU hotplug may cause in-flight IO completion to not be serviced when an interrupt is shutdown. That problem is solved in commit bf0beec0607d ("blk-mq: drain I/O when all CPUs in a hctx are offline"). However, to take advantage of that blk-mq feature, the HBA HW queuess are required to be mapped to that of the blk-mq hctx's; to do that, the HBA HW queues need to be exposed to the upper layer. In making that transition, the per-SCSI command request tags are no longer unique per Scsi host - they are just unique per hctx. As such, the HBA LLDD would have to generate this tag internally, which has a certain performance overhead. However another problem is that blk-mq assumes the host may accept (Scsi_host.can_queue * #hw queue) commands. In commit 6eb045e092ef ("scsi: core: avoid host-wide host_busy counter for scsi_mq"), the Scsi host busy counter was removed, which would stop the LLDD being sent more than .can_queue commands; however, it should still be ensured that the block layer does not issue more than .can_queue commands to the Scsi host. To solve this problem, introduce a shared sbitmap per blk_mq_tag_set, which may be requested at init time. New flag BLK_MQ_F_TAG_HCTX_SHARED should be set when requesting the tagset to indicate whether the shared sbitmap should be used. Even when BLK_MQ_F_TAG_HCTX_SHARED is set, a full set of tags and requests are still allocated per hctx; the reason for this is that if tags and requests were only allocated for a single hctx - like hctx0 - it may break block drivers which expect a request be associated with a specific hctx, i.e. not always hctx0. This will introduce extra memory usage. This change is based on work originally from Ming Lei in [1] and from Bart's suggestion in [2]. [0] https://lore.kernel.org/linux-block/alpine.DEB.2.21.1904051331270.1802@nanos.tec.linutronix.de/ [1] https://lore.kernel.org/linux-block/20190531022801.10003-1-ming.lei@redhat.com/ [2] https://lore.kernel.org/linux-block/ff77beff-5fd9-9f05-12b6-826922bace1f@huawei.com/T/#m3db0a602f095cbcbff27e9c884d6b4ae826144be Signed-off-by: John Garry Tested-by: Don Brace #SCSI resv cmds patches used Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 8 +++++--- block/blk-mq-tag.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++---- block/blk-mq-tag.h | 7 +++++++ block/blk-mq.c | 15 +++++++++++++++ block/blk-mq.h | 5 +++++ 5 files changed, 79 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index edfc8ea805bc..86d5545342ff 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -560,7 +560,7 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - unsigned int flags = set->flags; + unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; if (hctx->sched_tags) { blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); @@ -574,7 +574,8 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; - unsigned int flags = set->flags; + /* Clear HCTX_SHARED so tags are init'ed */ + unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; int ret; hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, @@ -596,7 +597,8 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - unsigned int flags = hctx->flags; + /* Clear HCTX_SHARED so tags are freed */ + unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; if (hctx->sched_tags) { blk_mq_free_rq_map(hctx->sched_tags, flags); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 77fbeae05745..c6d7ebc62bdb 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -197,7 +197,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * We can hit rq == NULL here, because the tagging functions * test and set the bit before assigning ->rqs[]. */ - if (rq && rq->q == hctx->queue) + if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx) return iter_data->fn(hctx, rq, iter_data->data, reserved); return true; } @@ -450,6 +450,38 @@ free_bitmap_tags: return -ENOMEM; } +int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags) +{ + unsigned int depth = set->queue_depth - set->reserved_tags; + int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); + bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; + int i, node = set->numa_node; + + if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node)) + return -ENOMEM; + if (bt_alloc(&set->__breserved_tags, set->reserved_tags, + round_robin, node)) + goto free_bitmap_tags; + + for (i = 0; i < set->nr_hw_queues; i++) { + struct blk_mq_tags *tags = set->tags[i]; + + tags->bitmap_tags = &set->__bitmap_tags; + tags->breserved_tags = &set->__breserved_tags; + } + + return 0; +free_bitmap_tags: + sbitmap_queue_free(&set->__bitmap_tags); + return -ENOMEM; +} + +void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) +{ + sbitmap_queue_free(&set->__bitmap_tags); + sbitmap_queue_free(&set->__breserved_tags); +} + struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node, unsigned int flags) @@ -469,6 +501,9 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; + if (flags & BLK_MQ_F_TAG_HCTX_SHARED) + return tags; + if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { kfree(tags); return NULL; @@ -478,8 +513,10 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) { - sbitmap_queue_free(&tags->__bitmap_tags); - sbitmap_queue_free(&tags->__breserved_tags); + if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) { + sbitmap_queue_free(tags->bitmap_tags); + sbitmap_queue_free(tags->breserved_tags); + } kfree(tags); } @@ -498,7 +535,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, */ if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; - unsigned int flags = set->flags; + /* Only sched tags can grow, so clear HCTX_SHARED flag */ + unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; struct blk_mq_tags *new; bool ret; @@ -537,6 +575,11 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, return 0; } +void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) +{ + sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); +} + /** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 5b6f25bd1966..8f4cce52ba2d 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -29,12 +29,19 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, int node, unsigned int flags); extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); +extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, + unsigned int flags); +extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set); + extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); +extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, + unsigned int size); + extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv); diff --git a/block/blk-mq.c b/block/blk-mq.c index 4982bb1d75db..5e8fe795edf1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3441,11 +3441,21 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (ret) goto out_free_mq_map; + if (blk_mq_is_sbitmap_shared(set->flags)) { + if (blk_mq_init_shared_sbitmap(set, set->flags)) { + ret = -ENOMEM; + goto out_free_mq_rq_maps; + } + } + mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; +out_free_mq_rq_maps: + for (i = 0; i < set->nr_hw_queues; i++) + blk_mq_free_map_and_requests(set, i); out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3464,6 +3474,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) blk_mq_free_map_and_requests(set, i); + if (blk_mq_is_sbitmap_shared(set->flags)) + blk_mq_exit_shared_sbitmap(set); + for (j = 0; j < set->nr_maps; j++) { kfree(set->map[j].mq_map); set->map[j].mq_map = NULL; @@ -3500,6 +3513,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (!hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); + if (!ret && blk_mq_is_sbitmap_shared(set->flags)) + blk_mq_tag_resize_shared_sbitmap(set, nr); } else { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); diff --git a/block/blk-mq.h b/block/blk-mq.h index b86e7fe47789..3acdb56065e1 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -159,6 +159,11 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; +static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) +{ + return flags & BLK_MQ_F_TAG_HCTX_SHARED; +} + static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) { if (data->q->elevator) -- cgit v1.2.3 From a0235d230f3245aa4bd70b514d5effb24be61acd Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:25 +0800 Subject: blk-mq: Relocate hctx_may_queue() blk-mq.h and blk-mq-tag.h include on each other, which is less than ideal. Locate hctx_may_queue() to blk-mq.h, as it is not really tag specific code. In this way, we can drop the blk-mq-tag.h include of blk-mq.h Signed-off-by: John Garry Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq-tag.h | 33 --------------------------------- block/blk-mq.h | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 33 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 8f4cce52ba2d..7d3e6b333a4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -2,8 +2,6 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -#include "blk-mq.h" - /* * Tag address space map. */ @@ -81,37 +79,6 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) __blk_mq_tag_idle(hctx); } -/* - * For shared tag users, we track the number of currently active users - * and attempt to provide a fair share of the tag depth for each of them. - */ -static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, - struct sbitmap_queue *bt) -{ - unsigned int depth, users; - - if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) - return true; - if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return true; - - /* - * Don't try dividing an ant - */ - if (bt->sb.depth == 1) - return true; - - users = atomic_read(&hctx->tags->active_queues); - if (!users) - return true; - - /* - * Allow at least some tags - */ - depth = max((bt->sb.depth + users - 1) / users, 4U); - return atomic_read(&hctx->nr_active) < depth; -} - static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, unsigned int tag) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 3acdb56065e1..56dc37c21908 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -259,4 +259,36 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q, return NULL; } +/* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct sbitmap_queue *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->sb.depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->sb.depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + + #endif -- cgit v1.2.3 From bccf5e26d99c28980bd6ced474422a1b18402263 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:26 +0800 Subject: blk-mq: Record nr_active_requests per queue for when using shared sbitmap The per-hctx nr_active value can no longer be used to fairly assign a share of tag depth per request queue for when using a shared sbitmap, as it does not consider that the tags are shared tags over all hctx's. For this case, record the nr_active_requests per request_queue, and make the judgement based on that value. Co-developed-with: Kashyap Desai Signed-off-by: John Garry Tested-by: Don Brace #SCSI resv cmds patches used Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-core.c | 2 ++ block/blk-mq.c | 4 ++-- block/blk-mq.h | 26 ++++++++++++++++++++++++-- 3 files changed, 28 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 9f2a99abeeb9..093649bd252e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -543,6 +543,8 @@ struct request_queue *blk_alloc_queue(int node_id) q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; + atomic_set(&q->nr_active_requests_shared_sbitmap, 0); + timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); diff --git a/block/blk-mq.c b/block/blk-mq.c index 5e8fe795edf1..ffc5ad0c91b7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -519,7 +519,7 @@ void blk_mq_free_request(struct request *rq) ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) - atomic_dec(&hctx->nr_active); + __blk_mq_dec_active_requests(hctx); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->backing_dev_info); @@ -1127,7 +1127,7 @@ static bool blk_mq_get_driver_tag(struct request *rq) if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && !(rq->rq_flags & RQF_MQ_INFLIGHT)) { rq->rq_flags |= RQF_MQ_INFLIGHT; - atomic_inc(&hctx->nr_active); + __blk_mq_inc_active_requests(hctx); } hctx->tags->rqs[rq->tag] = rq; return true; diff --git a/block/blk-mq.h b/block/blk-mq.h index 56dc37c21908..25ec73078e95 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -199,6 +199,28 @@ static inline bool blk_mq_get_dispatch_budget(struct request_queue *q) return true; } +static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); + else + atomic_inc(&hctx->nr_active); +} + +static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); + else + atomic_dec(&hctx->nr_active); +} + +static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) + return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); + return atomic_read(&hctx->nr_active); +} static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq) { @@ -207,7 +229,7 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, if (rq->rq_flags & RQF_MQ_INFLIGHT) { rq->rq_flags &= ~RQF_MQ_INFLIGHT; - atomic_dec(&hctx->nr_active); + __blk_mq_dec_active_requests(hctx); } } @@ -287,7 +309,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, * Allow at least some tags */ depth = max((bt->sb.depth + users - 1) / users, 4U); - return atomic_read(&hctx->nr_active) < depth; + return __blk_mq_active_requests(hctx) < depth; } -- cgit v1.2.3 From f1b49fdc1c64db110aa1315831e5fe0f8599fa56 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 19 Aug 2020 23:20:27 +0800 Subject: blk-mq: Record active_queues_shared_sbitmap per tag_set for when using shared sbitmap For when using a shared sbitmap, no longer should the number of active request queues per hctx be relied on for when judging how to share the tag bitmap. Instead maintain the number of active request queues per tag_set, and make the judgement based on that. Originally-from: Kashyap Desai Signed-off-by: John Garry Tested-by: Don Brace #SCSI resv cmds patches used Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 33 +++++++++++++++++++++++++-------- block/blk-mq.c | 2 ++ block/blk-mq.h | 16 +++++++++++++--- 3 files changed, 40 insertions(+), 11 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c6d7ebc62bdb..c31c4a0478a5 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -23,9 +23,18 @@ */ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { - if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && - !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - atomic_inc(&hctx->tags->active_queues); + if (blk_mq_is_sbitmap_shared(hctx->flags)) { + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && + !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) + atomic_inc(&set->active_queues_shared_sbitmap); + } else { + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); + } return true; } @@ -47,11 +56,19 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; - - if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return; - - atomic_dec(&tags->active_queues); + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, + &q->queue_flags)) + return; + atomic_dec(&set->active_queues_shared_sbitmap); + } else { + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + atomic_dec(&tags->active_queues); + } blk_mq_tag_wakeup_all(tags, false); } diff --git a/block/blk-mq.c b/block/blk-mq.c index ffc5ad0c91b7..eff9d987f85b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3442,6 +3442,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) goto out_free_mq_map; if (blk_mq_is_sbitmap_shared(set->flags)) { + atomic_set(&set->active_queues_shared_sbitmap, 0); + if (blk_mq_init_shared_sbitmap(set, set->flags)) { ret = -ENOMEM; goto out_free_mq_rq_maps; diff --git a/block/blk-mq.h b/block/blk-mq.h index 25ec73078e95..a52703c98b77 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -292,8 +292,6 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) return true; - if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return true; /* * Don't try dividing an ant @@ -301,7 +299,19 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (bt->sb.depth == 1) return true; - users = atomic_read(&hctx->tags->active_queues); + if (blk_mq_is_sbitmap_shared(hctx->flags)) { + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags)) + return true; + users = atomic_read(&set->active_queues_shared_sbitmap); + } else { + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + users = atomic_read(&hctx->tags->active_queues); + } + if (!users) return true; -- cgit v1.2.3 From b445547ec1bbd3e7bf4b1c142550942f70527d95 Mon Sep 17 00:00:00 2001 From: Kashyap Desai Date: Wed, 19 Aug 2020 23:20:28 +0800 Subject: blk-mq, elevator: Count requests per hctx to improve performance High CPU utilization on "native_queued_spin_lock_slowpath" due to lock contention is possible for mq-deadline and bfq IO schedulers when nr_hw_queues is more than one. It is because kblockd work queue can submit IO from all online CPUs (through blk_mq_run_hw_queues()) even though only one hctx has pending commands. The elevator callback .has_work for mq-deadline and bfq scheduler considers pending work if there are any IOs on request queue but it does not account hctx context. Add a per-hctx 'elevator_queued' count to the hctx to avoid triggering the elevator even though there are no requests queued. [jpg: Relocated atomic_dec() in dd_dispatch_request(), update commit message per Kashyap] Signed-off-by: Kashyap Desai Signed-off-by: Hannes Reinecke Signed-off-by: John Garry Tested-by: Douglas Gilbert Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 5 +++++ block/blk-mq.c | 1 + block/mq-deadline.c | 6 ++++++ 3 files changed, 12 insertions(+) (limited to 'block') diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 418ffd4b5552..a251b80d8ee5 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4640,6 +4640,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) { struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; + if (!atomic_read(&hctx->elevator_queued)) + return false; + /* * Avoiding lock: a race on bfqd->busy_queues should cause at * most a call to dispatch for nothing @@ -5554,6 +5557,7 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); bfq_insert_request(hctx, rq, at_head); + atomic_inc(&hctx->elevator_queued); } } @@ -5933,6 +5937,7 @@ static void bfq_finish_requeue_request(struct request *rq) bfq_completed_request(bfqq, bfqd); bfq_finish_requeue_request_body(bfqq); + atomic_dec(&rq->mq_hctx->elevator_queued); spin_unlock_irqrestore(&bfqd->lock, flags); } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index eff9d987f85b..4abb71459f94 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2660,6 +2660,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, goto free_hctx; atomic_set(&hctx->nr_active, 0); + atomic_set(&hctx->elevator_queued, 0); if (node == NUMA_NO_NODE) node = set->numa_node; hctx->numa_node = node; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index b57470e154c8..800ac902809b 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -386,6 +386,8 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) spin_lock(&dd->lock); rq = __dd_dispatch_request(dd); spin_unlock(&dd->lock); + if (rq) + atomic_dec(&rq->mq_hctx->elevator_queued); return rq; } @@ -533,6 +535,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); dd_insert_request(hctx, rq, at_head); + atomic_inc(&hctx->elevator_queued); } spin_unlock(&dd->lock); } @@ -579,6 +582,9 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; + if (!atomic_read(&hctx->elevator_queued)) + return false; + return !list_empty_careful(&dd->dispatch) || !list_empty_careful(&dd->fifo_list[0]) || !list_empty_careful(&dd->fifo_list[1]); -- cgit v1.2.3 From 384d87ef2c954fc58e6c5fd8253e4a1984f5fe02 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 4 Sep 2020 10:58:52 +0200 Subject: block: Do not discard buffers under a mounted filesystem Discarding blocks and buffers under a mounted filesystem is hardly anything admin wants to do. Usually it will confuse the filesystem and sometimes the loss of buffer_head state (including b_private field) can even cause crashes like: BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 4 PID: 203778 Comm: jbd2/dm-3-8 Kdump: loaded Tainted: G O --------- - - 4.18.0-147.5.0.5.h126.eulerosv2r9.x86_64 #1 Hardware name: Huawei RH2288H V3/BC11HGSA0, BIOS 1.57 08/11/2015 RIP: 0010:jbd2_journal_grab_journal_head+0x1b/0x40 [jbd2] ... Call Trace: __jbd2_journal_insert_checkpoint+0x23/0x70 [jbd2] jbd2_journal_commit_transaction+0x155f/0x1b60 [jbd2] kjournald2+0xbd/0x270 [jbd2] So if we don't have block device open with O_EXCL already, claim the block device while we truncate buffer cache. This makes sure any exclusive block device user (such as filesystem) cannot operate on the device while we are discarding buffer cache. Reported-by: Ye Bin Signed-off-by: Jan Kara Reviewed-by: Christoph Hellwig [axboe: fix !CONFIG_BLOCK error in truncate_bdev_range()] Signed-off-by: Jens Axboe --- block/ioctl.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/ioctl.c b/block/ioctl.c index bdb3bbb253d9..ae74d0409afa 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -112,8 +112,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, uint64_t range[2]; uint64_t start, len; struct request_queue *q = bdev_get_queue(bdev); - struct address_space *mapping = bdev->bd_inode->i_mapping; - + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -134,7 +133,11 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, if (start + len > i_size_read(bdev->bd_inode)) return -EINVAL; - truncate_inode_pages_range(mapping, start, start + len - 1); + + err = truncate_bdev_range(bdev, mode, start, start + len - 1); + if (err) + return err; + return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, flags); } @@ -143,8 +146,8 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, unsigned long arg) { uint64_t range[2]; - struct address_space *mapping; uint64_t start, end, len; + int err; if (!(mode & FMODE_WRITE)) return -EBADF; @@ -166,8 +169,9 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, return -EINVAL; /* Invalidate the page cache, including dirty pages */ - mapping = bdev->bd_inode->i_mapping; - truncate_inode_pages_range(mapping, start, end); + err = truncate_bdev_range(bdev, mode, start, end); + if (err) + return err; return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); -- cgit v1.2.3 From ddfb8b0beda1c371a44897c0359adccf030f7c58 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:36:17 +0800 Subject: block: Remove unused blk_mq_sched_free_hctx_data() Now we usually free the hctx->sched_data by e->type->ops.exit_hctx(), and no users will use blk_mq_sched_free_hctx_data() function. Remove it. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 15 --------------- block/blk-mq-sched.h | 3 --- 2 files changed, 18 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 86d5545342ff..3e9596738852 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -18,21 +18,6 @@ #include "blk-mq-tag.h" #include "blk-wbt.h" -void blk_mq_sched_free_hctx_data(struct request_queue *q, - void (*exit)(struct blk_mq_hw_ctx *)) -{ - struct blk_mq_hw_ctx *hctx; - int i; - - queue_for_each_hw_ctx(q, hctx, i) { - if (exit && hctx->sched_data) - exit(hctx); - kfree(hctx->sched_data); - hctx->sched_data = NULL; - } -} -EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); - void blk_mq_sched_assign_ioc(struct request *rq) { struct request_queue *q = rq->q; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 126021fc3a11..fe62e7ce0f9e 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -5,9 +5,6 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_sched_free_hctx_data(struct request_queue *q, - void (*exit)(struct blk_mq_hw_ctx *)); - void blk_mq_sched_assign_ioc(struct request *rq); void blk_mq_sched_request_inserted(struct request *rq); -- cgit v1.2.3 From 3562614705c6f2324f79a959a2cf1ae24d142578 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 08:07:00 +0200 Subject: block: add helper macros for queue sysfs entries Add two helpers macros to avoid boilerplate code for the queue sysfs entries. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 248 +++++++++++++----------------------------------------- 1 file changed, 58 insertions(+), 190 deletions(-) (limited to 'block') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7dda709f3ccb..9df877d3dae2 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -547,159 +547,75 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page) return queue_var_show(blk_queue_dax(q), page); } -static struct queue_sysfs_entry queue_requests_entry = { - .attr = {.name = "nr_requests", .mode = 0644 }, - .show = queue_requests_show, - .store = queue_requests_store, -}; - -static struct queue_sysfs_entry queue_ra_entry = { - .attr = {.name = "read_ahead_kb", .mode = 0644 }, - .show = queue_ra_show, - .store = queue_ra_store, -}; - -static struct queue_sysfs_entry queue_max_sectors_entry = { - .attr = {.name = "max_sectors_kb", .mode = 0644 }, - .show = queue_max_sectors_show, - .store = queue_max_sectors_store, -}; +#define QUEUE_RO_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0444 }, \ + .show = _prefix##_show, \ +}; + +#define QUEUE_RW_ENTRY(_prefix, _name) \ +static struct queue_sysfs_entry _prefix##_entry = { \ + .attr = { .name = _name, .mode = 0644 }, \ + .show = _prefix##_show, \ + .store = _prefix##_store, \ +}; + +QUEUE_RW_ENTRY(queue_requests, "nr_requests"); +QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); +QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); +QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); +QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); +QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); +QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); +QUEUE_RW_ENTRY(elv_iosched, "scheduler"); + +QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); +QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); +QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); +QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); +QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); + +QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); +QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); +QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); +QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); +QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); + +QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); +QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); +QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); + +QUEUE_RO_ENTRY(queue_zoned, "zoned"); +QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); +QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); +QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); + +QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); +QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); +QUEUE_RW_ENTRY(queue_poll, "io_poll"); +QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); +QUEUE_RW_ENTRY(queue_wc, "write_cache"); +QUEUE_RO_ENTRY(queue_fua, "fua"); +QUEUE_RO_ENTRY(queue_dax, "dax"); +QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); +QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); -static struct queue_sysfs_entry queue_max_hw_sectors_entry = { - .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, - .show = queue_max_hw_sectors_show, -}; - -static struct queue_sysfs_entry queue_max_segments_entry = { - .attr = {.name = "max_segments", .mode = 0444 }, - .show = queue_max_segments_show, -}; - -static struct queue_sysfs_entry queue_max_discard_segments_entry = { - .attr = {.name = "max_discard_segments", .mode = 0444 }, - .show = queue_max_discard_segments_show, -}; - -static struct queue_sysfs_entry queue_max_integrity_segments_entry = { - .attr = {.name = "max_integrity_segments", .mode = 0444 }, - .show = queue_max_integrity_segments_show, -}; - -static struct queue_sysfs_entry queue_max_segment_size_entry = { - .attr = {.name = "max_segment_size", .mode = 0444 }, - .show = queue_max_segment_size_show, -}; - -static struct queue_sysfs_entry queue_iosched_entry = { - .attr = {.name = "scheduler", .mode = 0644 }, - .show = elv_iosched_show, - .store = elv_iosched_store, -}; +#ifdef CONFIG_BLK_DEV_THROTTLING_LOW +QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); +#endif +/* legacy alias for logical_block_size: */ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = 0444 }, .show = queue_logical_block_size_show, }; -static struct queue_sysfs_entry queue_logical_block_size_entry = { - .attr = {.name = "logical_block_size", .mode = 0444 }, - .show = queue_logical_block_size_show, -}; - -static struct queue_sysfs_entry queue_physical_block_size_entry = { - .attr = {.name = "physical_block_size", .mode = 0444 }, - .show = queue_physical_block_size_show, -}; - -static struct queue_sysfs_entry queue_chunk_sectors_entry = { - .attr = {.name = "chunk_sectors", .mode = 0444 }, - .show = queue_chunk_sectors_show, -}; - -static struct queue_sysfs_entry queue_io_min_entry = { - .attr = {.name = "minimum_io_size", .mode = 0444 }, - .show = queue_io_min_show, -}; - -static struct queue_sysfs_entry queue_io_opt_entry = { - .attr = {.name = "optimal_io_size", .mode = 0444 }, - .show = queue_io_opt_show, -}; - -static struct queue_sysfs_entry queue_discard_granularity_entry = { - .attr = {.name = "discard_granularity", .mode = 0444 }, - .show = queue_discard_granularity_show, -}; - -static struct queue_sysfs_entry queue_discard_max_hw_entry = { - .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, - .show = queue_discard_max_hw_show, -}; - -static struct queue_sysfs_entry queue_discard_max_entry = { - .attr = {.name = "discard_max_bytes", .mode = 0644 }, - .show = queue_discard_max_show, - .store = queue_discard_max_store, -}; - -static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { - .attr = {.name = "discard_zeroes_data", .mode = 0444 }, - .show = queue_discard_zeroes_data_show, -}; - -static struct queue_sysfs_entry queue_write_same_max_entry = { - .attr = {.name = "write_same_max_bytes", .mode = 0444 }, - .show = queue_write_same_max_show, -}; - -static struct queue_sysfs_entry queue_write_zeroes_max_entry = { - .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, - .show = queue_write_zeroes_max_show, -}; - -static struct queue_sysfs_entry queue_zone_append_max_entry = { - .attr = {.name = "zone_append_max_bytes", .mode = 0444 }, - .show = queue_zone_append_max_show, -}; - static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = 0644 }, .show = queue_show_nonrot, .store = queue_store_nonrot, }; -static struct queue_sysfs_entry queue_zoned_entry = { - .attr = {.name = "zoned", .mode = 0444 }, - .show = queue_zoned_show, -}; - -static struct queue_sysfs_entry queue_nr_zones_entry = { - .attr = {.name = "nr_zones", .mode = 0444 }, - .show = queue_nr_zones_show, -}; - -static struct queue_sysfs_entry queue_max_open_zones_entry = { - .attr = {.name = "max_open_zones", .mode = 0444 }, - .show = queue_max_open_zones_show, -}; - -static struct queue_sysfs_entry queue_max_active_zones_entry = { - .attr = {.name = "max_active_zones", .mode = 0444 }, - .show = queue_max_active_zones_show, -}; - -static struct queue_sysfs_entry queue_nomerges_entry = { - .attr = {.name = "nomerges", .mode = 0644 }, - .show = queue_nomerges_show, - .store = queue_nomerges_store, -}; - -static struct queue_sysfs_entry queue_rq_affinity_entry = { - .attr = {.name = "rq_affinity", .mode = 0644 }, - .show = queue_rq_affinity_show, - .store = queue_rq_affinity_store, -}; - static struct queue_sysfs_entry queue_iostats_entry = { .attr = {.name = "iostats", .mode = 0644 }, .show = queue_show_iostats, @@ -712,54 +628,6 @@ static struct queue_sysfs_entry queue_random_entry = { .store = queue_store_random, }; -static struct queue_sysfs_entry queue_poll_entry = { - .attr = {.name = "io_poll", .mode = 0644 }, - .show = queue_poll_show, - .store = queue_poll_store, -}; - -static struct queue_sysfs_entry queue_poll_delay_entry = { - .attr = {.name = "io_poll_delay", .mode = 0644 }, - .show = queue_poll_delay_show, - .store = queue_poll_delay_store, -}; - -static struct queue_sysfs_entry queue_wc_entry = { - .attr = {.name = "write_cache", .mode = 0644 }, - .show = queue_wc_show, - .store = queue_wc_store, -}; - -static struct queue_sysfs_entry queue_fua_entry = { - .attr = {.name = "fua", .mode = 0444 }, - .show = queue_fua_show, -}; - -static struct queue_sysfs_entry queue_dax_entry = { - .attr = {.name = "dax", .mode = 0444 }, - .show = queue_dax_show, -}; - -static struct queue_sysfs_entry queue_io_timeout_entry = { - .attr = {.name = "io_timeout", .mode = 0644 }, - .show = queue_io_timeout_show, - .store = queue_io_timeout_store, -}; - -static struct queue_sysfs_entry queue_wb_lat_entry = { - .attr = {.name = "wbt_lat_usec", .mode = 0644 }, - .show = queue_wb_lat_show, - .store = queue_wb_lat_store, -}; - -#ifdef CONFIG_BLK_DEV_THROTTLING_LOW -static struct queue_sysfs_entry throtl_sample_time_entry = { - .attr = {.name = "throttle_sample_time", .mode = 0644 }, - .show = blk_throtl_sample_time_show, - .store = blk_throtl_sample_time_store, -}; -#endif - static struct attribute *queue_attrs[] = { &queue_requests_entry.attr, &queue_ra_entry.attr, @@ -769,7 +637,7 @@ static struct attribute *queue_attrs[] = { &queue_max_discard_segments_entry.attr, &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, - &queue_iosched_entry.attr, + &elv_iosched_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, @@ -800,7 +668,7 @@ static struct attribute *queue_attrs[] = { &queue_poll_delay_entry.attr, &queue_io_timeout_entry.attr, #ifdef CONFIG_BLK_DEV_THROTTLING_LOW - &throtl_sample_time_entry.attr, + &blk_throtl_sample_time_entry.attr, #endif NULL, }; -- cgit v1.2.3 From fc93fe145316dad6186d8eb4b4f49f68b1cba0c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 08:07:01 +0200 Subject: block: make QUEUE_SYSFS_BIT_FNS more useful Switch to the naming used by the other entries so that we can use the QUEUE_RW_ENTRY helper. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'block') diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 9df877d3dae2..81722cdcf0cb 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -260,14 +260,14 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ static ssize_t \ -queue_show_##name(struct request_queue *q, char *page) \ +queue_##name##_show(struct request_queue *q, char *page) \ { \ int bit; \ bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ return queue_var_show(neg ? !bit : bit, page); \ } \ static ssize_t \ -queue_store_##name(struct request_queue *q, const char *page, size_t count) \ +queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ { \ unsigned long val; \ ssize_t ret; \ @@ -610,23 +610,9 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { .show = queue_logical_block_size_show, }; -static struct queue_sysfs_entry queue_nonrot_entry = { - .attr = {.name = "rotational", .mode = 0644 }, - .show = queue_show_nonrot, - .store = queue_store_nonrot, -}; - -static struct queue_sysfs_entry queue_iostats_entry = { - .attr = {.name = "iostats", .mode = 0644 }, - .show = queue_show_iostats, - .store = queue_store_iostats, -}; - -static struct queue_sysfs_entry queue_random_entry = { - .attr = {.name = "add_random", .mode = 0644 }, - .show = queue_show_random, - .store = queue_store_random, -}; +QUEUE_RW_ENTRY(queue_nonrot, "rotational"); +QUEUE_RW_ENTRY(queue_iostats, "iostats"); +QUEUE_RW_ENTRY(queue_random, "add_random"); static struct attribute *queue_attrs[] = { &queue_requests_entry.attr, -- cgit v1.2.3 From 95f6f3a46fc4ee1a2b216a6b46bdf2b450f1877f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 8 Sep 2020 16:53:29 +0200 Subject: block: add a bdev_check_media_change helper Like check_disk_changed, except that it does not call ->revalidate_disk but leaves that to the caller. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe --- block/genhd.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 081f1039d936..9d060e79eb31 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -2052,7 +2052,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) * CONTEXT: * Might sleep. */ -unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) +static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) { struct disk_events *ev = disk->ev; unsigned int pending; @@ -2090,6 +2090,33 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) return pending; } +/** + * bdev_check_media_change - check if a removable media has been changed + * @bdev: block device to check + * + * Check whether a removable media has been changed, and attempt to free all + * dentries and inodes and invalidates all block device page cache entries in + * that case. + * + * Returns %true if the block device changed, or %false if not. + */ +bool bdev_check_media_change(struct block_device *bdev) +{ + unsigned int events; + + events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE | + DISK_EVENT_EJECT_REQUEST); + if (!(events & DISK_EVENT_MEDIA_CHANGE)) + return false; + + if (__invalidate_device(bdev, true)) + pr_warn("VFS: busy inodes on changed media %s\n", + bdev->bd_disk->disk_name); + set_bit(BDEV_NEED_PART_SCAN, &bdev->bd_flags); + return true; +} +EXPORT_SYMBOL(bdev_check_media_change); + /* * Separate this part out so that a different pointer for clearing_ptr can be * passed in for disk_clear_events. -- cgit v1.2.3 From 192f1c6bc2d2dc59de6c39dc229156910fdf6fe8 Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Thu, 10 Sep 2020 14:15:06 +0800 Subject: blkcg: add plugging support for punt bio The test and the explaination of the patch as bellow. Before test we added more debug code in blkg_async_bio_workfn(): int count = 0 if (bios.head && bios.head->bi_next) { need_plug = true; blk_start_plug(&plug); } while ((bio = bio_list_pop(&bios))) { /*io_punt is a sysctl user interface to control the print*/ if(io_punt) { printk("[%s:%d] bio start,size:%llu,%d count=%d plug?%d\n", current->comm, current->pid, bio->bi_iter.bi_sector, (bio->bi_iter.bi_size)>>9, count++, need_plug); } submit_bio(bio); } if (need_plug) blk_finish_plug(&plug); Steps that need to be set to trigger *PUNT* io before testing: mount -t btrfs -o compress=lzo /dev/sda6 /btrfs mount -t cgroup2 nodev /cgroup2 mkdir /cgroup2/cg3 echo "+io" > /cgroup2/cgroup.subtree_control echo "8:0 wbps=1048576000" > /cgroup2/cg3/io.max #1000M/s echo $$ > /cgroup2/cg3/cgroup.procs Then use dd command to test btrfs PUNT io in current shell: dd if=/dev/zero of=/btrfs/file bs=64K count=100000 Test hardware environment as below: [root@localhost btrfs]# lscpu Architecture: x86_64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Little Endian CPU(s): 32 On-line CPU(s) list: 0-31 Thread(s) per core: 2 Core(s) per socket: 8 Socket(s): 2 NUMA node(s): 2 Vendor ID: GenuineIntel With above debug code, test command and test environment, I did the tests under 3 different system loads, which are triggered by stress: 1, Run 64 threads by command "stress -c 64 &" [53615.975974] [kworker/u66:18:1490] bio start,size:45583056,8 count=0 plug?1 [53615.975980] [kworker/u66:18:1490] bio start,size:45583064,8 count=1 plug?1 [53615.975984] [kworker/u66:18:1490] bio start,size:45583072,8 count=2 plug?1 [53615.975987] [kworker/u66:18:1490] bio start,size:45583080,8 count=3 plug?1 [53615.975990] [kworker/u66:18:1490] bio start,size:45583088,8 count=4 plug?1 [53615.975993] [kworker/u66:18:1490] bio start,size:45583096,8 count=5 plug?1 ... ... [53615.977041] [kworker/u66:18:1490] bio start,size:45585480,8 count=303 plug?1 [53615.977044] [kworker/u66:18:1490] bio start,size:45585488,8 count=304 plug?1 [53615.977047] [kworker/u66:18:1490] bio start,size:45585496,8 count=305 plug?1 [53615.977050] [kworker/u66:18:1490] bio start,size:45585504,8 count=306 plug?1 [53615.977053] [kworker/u66:18:1490] bio start,size:45585512,8 count=307 plug?1 [53615.977056] [kworker/u66:18:1490] bio start,size:45585520,8 count=308 plug?1 [53615.977058] [kworker/u66:18:1490] bio start,size:45585528,8 count=309 plug?1 2, Run 32 threads by command "stress -c 32 &" [50586.290521] [kworker/u66:6:32351] bio start,size:45806496,8 count=0 plug?1 [50586.290526] [kworker/u66:6:32351] bio start,size:45806504,8 count=1 plug?1 [50586.290529] [kworker/u66:6:32351] bio start,size:45806512,8 count=2 plug?1 [50586.290531] [kworker/u66:6:32351] bio start,size:45806520,8 count=3 plug?1 [50586.290533] [kworker/u66:6:32351] bio start,size:45806528,8 count=4 plug?1 [50586.290535] [kworker/u66:6:32351] bio start,size:45806536,8 count=5 plug?1 ... ... [50586.299640] [kworker/u66:5:32350] bio start,size:45808576,8 count=252 plug?1 [50586.299643] [kworker/u66:5:32350] bio start,size:45808584,8 count=253 plug?1 [50586.299646] [kworker/u66:5:32350] bio start,size:45808592,8 count=254 plug?1 [50586.299649] [kworker/u66:5:32350] bio start,size:45808600,8 count=255 plug?1 [50586.299652] [kworker/u66:5:32350] bio start,size:45808608,8 count=256 plug?1 [50586.299663] [kworker/u66:5:32350] bio start,size:45808616,8 count=257 plug?1 [50586.299665] [kworker/u66:5:32350] bio start,size:45808624,8 count=258 plug?1 [50586.299668] [kworker/u66:5:32350] bio start,size:45808632,8 count=259 plug?1 3, Don't run thread by stress [50861.355246] [kworker/u66:19:32376] bio start,size:13544504,8 count=0 plug?0 [50861.355288] [kworker/u66:19:32376] bio start,size:13544512,8 count=0 plug?0 [50861.355322] [kworker/u66:19:32376] bio start,size:13544520,8 count=0 plug?0 [50861.355353] [kworker/u66:19:32376] bio start,size:13544528,8 count=0 plug?0 [50861.355392] [kworker/u66:19:32376] bio start,size:13544536,8 count=0 plug?0 [50861.355431] [kworker/u66:19:32376] bio start,size:13544544,8 count=0 plug?0 [50861.355468] [kworker/u66:19:32376] bio start,size:13544552,8 count=0 plug?0 [50861.355499] [kworker/u66:19:32376] bio start,size:13544560,8 count=0 plug?0 [50861.355532] [kworker/u66:19:32376] bio start,size:13544568,8 count=0 plug?0 [50861.355575] [kworker/u66:19:32376] bio start,size:13544576,8 count=0 plug?0 [50861.355618] [kworker/u66:19:32376] bio start,size:13544584,8 count=0 plug?0 [50861.355659] [kworker/u66:19:32376] bio start,size:13544592,8 count=0 plug?0 [50861.355740] [kworker/u66:0:32346] bio start,size:13544600,8 count=0 plug?1 [50861.355748] [kworker/u66:0:32346] bio start,size:13544608,8 count=1 plug?1 [50861.355962] [kworker/u66:2:32347] bio start,size:13544616,8 count=0 plug?0 [50861.356272] [kworker/u66:7:31962] bio start,size:13544624,8 count=0 plug?0 [50861.356446] [kworker/u66:7:31962] bio start,size:13544632,8 count=0 plug?0 [50861.356567] [kworker/u66:7:31962] bio start,size:13544640,8 count=0 plug?0 [50861.356707] [kworker/u66:19:32376] bio start,size:13544648,8 count=0 plug?0 [50861.356748] [kworker/u66:15:32355] bio start,size:13544656,8 count=0 plug?0 [50861.356825] [kworker/u66:17:31970] bio start,size:13544664,8 count=0 plug?0 Analysis of above 3 test results with different system load: >From above test, we can see more and more continuous bios can be plugged with system load increasing. When run "stress -c 64 &", 310 continuous bios are plugged; When run "stress -c 32 &", 260 continuous bios are plugged; When don't run stress, at most only 2 continuous bios are plugged, in most cases, bio_list only contains one single bio. How to explain above phenomenon: We know, in submit_bio(), if the bio is a REQ_CGROUP_PUNT io, it will queue a work to workqueue blkcg_punt_bio_wq. But when the workqueue is scheduled, it depends on the system load. When system load is low, the workqueue will be quickly scheduled, and the bio in bio_list will be quickly processed in blkg_async_bio_workfn(), so there is less chance that the same io submit thread can add multiple continuous bios to bio_list before workqueue is scheduled to run. The analysis aligned with above test "3". When system load is high, there is some delay before the workqueue can be scheduled to run, the higher the system load the greater the delay. So there is more chance that the same io submit thread can add multiple continuous bios to bio_list. Then when the workqueue is scheduled to run, there are more continuous bios in bio_list, which will be processed in blkg_async_bio_workfn(). The analysis aligned with above test "1" and "2". According to test, we can get io performance improved with the patch, especially when system load is higher. Another optimazition is to use the plug only when bio_list contains at least 2 bios. Signed-off-by: Xianting Tian Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d33dd6be1d9c..f9b55614d67d 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -119,6 +119,8 @@ static void blkg_async_bio_workfn(struct work_struct *work) async_bio_work); struct bio_list bios = BIO_EMPTY_LIST; struct bio *bio; + struct blk_plug plug; + bool need_plug = false; /* as long as there are pending bios, @blkg can't go away */ spin_lock_bh(&blkg->async_bio_lock); @@ -126,8 +128,15 @@ static void blkg_async_bio_workfn(struct work_struct *work) bio_list_init(&blkg->async_bios); spin_unlock_bh(&blkg->async_bio_lock); + /* start plug only when bio_list contains at least 2 bios */ + if (bios.head && bios.head->bi_next) { + need_plug = true; + blk_start_plug(&plug); + } while ((bio = bio_list_pop(&bios))) submit_bio(bio); + if (need_plug) + blk_finish_plug(&plug); } /** -- cgit v1.2.3 From 84ed2573c5427de47c71a69d8b0a18020d5f6dd5 Mon Sep 17 00:00:00 2001 From: Tian Tao Date: Fri, 11 Sep 2020 18:58:52 +0800 Subject: block: remove duplicate include statement in scsi_ioctl.c scsi/sg.h is included more than once, Remove the one that isn't necessary. Signed-off-by: Tian Tao Signed-off-by: Jens Axboe --- block/scsi_ioctl.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'block') diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index ef722f04f88a..4421e61c1af1 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -37,8 +37,6 @@ const unsigned char scsi_command_size_tbl[8] = }; EXPORT_SYMBOL(scsi_command_size_tbl); -#include - static int sg_get_version(int __user *p) { static const int sg_version_num = 30527; -- cgit v1.2.3 From 285008501c65a3fcee05d2c2c26cbf629ceff2f0 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 11 Sep 2020 18:41:14 +0800 Subject: blk-mq: always allow reserved allocation in hctx_may_queue NVMe shares tagset between fabric queue and admin queue or between connect_q and NS queue, so hctx_may_queue() can be called to allocate request for these queues. Tags can be reserved in these tagset. Before error recovery, there is often lots of in-flight requests which can't be completed, and new reserved request may be needed in error recovery path. However, hctx_may_queue() can always return false because there is too many in-flight requests which can't be completed during error handling. Finally, nothing can proceed. Fix this issue by always allowing reserved tag allocation in hctx_may_queue(). This is reasonable because reserved tags are supposed to always be available. Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Cc: David Milburn Cc: Ewan D. Milne Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 3 ++- block/blk-mq.c | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c31c4a0478a5..aacf10decdbd 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -76,7 +76,8 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { - if (!data->q->elevator && !hctx_may_queue(data->hctx, bt)) + if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && + !hctx_may_queue(data->hctx, bt)) return BLK_MQ_NO_TAG; if (data->shallow_depth) diff --git a/block/blk-mq.c b/block/blk-mq.c index 4abb71459f94..e04b759add75 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1105,10 +1105,11 @@ static bool __blk_mq_get_driver_tag(struct request *rq) if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { bt = rq->mq_hctx->tags->breserved_tags; tag_offset = 0; + } else { + if (!hctx_may_queue(rq->mq_hctx, bt)) + return false; } - if (!hctx_may_queue(rq->mq_hctx, bt)) - return false; tag = __sbitmap_queue_get(bt); if (tag == BLK_MQ_NO_TAG) return false; -- cgit v1.2.3 From 7b26410b05f8c262688de8a689ba8e5d0c3cff01 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 31 Aug 2020 15:27:23 -0700 Subject: block: introduce part_[begin|end]_io_acct These functions can be used to enable iostat for partitions on devices like md, bcache. Signed-off-by: Song Liu Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 39 +++++++++++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 093649bd252e..ca3f0f00c943 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1324,10 +1324,9 @@ void blk_account_io_start(struct request *rq) part_stat_unlock(); } -unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, - unsigned int op) +static unsigned long __part_start_io_acct(struct hd_struct *part, + unsigned int sectors, unsigned int op) { - struct hd_struct *part = &disk->part0; const int sgrp = op_stat_group(op); unsigned long now = READ_ONCE(jiffies); @@ -1340,12 +1339,26 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, return now; } + +unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part, + struct bio *bio) +{ + *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector); + + return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio)); +} +EXPORT_SYMBOL_GPL(part_start_io_acct); + +unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, + unsigned int op) +{ + return __part_start_io_acct(&disk->part0, sectors, op); +} EXPORT_SYMBOL(disk_start_io_acct); -void disk_end_io_acct(struct gendisk *disk, unsigned int op, - unsigned long start_time) +static void __part_end_io_acct(struct hd_struct *part, unsigned int op, + unsigned long start_time) { - struct hd_struct *part = &disk->part0; const int sgrp = op_stat_group(op); unsigned long now = READ_ONCE(jiffies); unsigned long duration = now - start_time; @@ -1356,6 +1369,20 @@ void disk_end_io_acct(struct gendisk *disk, unsigned int op, part_stat_local_dec(part, in_flight[op_is_write(op)]); part_stat_unlock(); } + +void part_end_io_acct(struct hd_struct *part, struct bio *bio, + unsigned long start_time) +{ + __part_end_io_acct(part, bio_op(bio), start_time); + hd_struct_put(part); +} +EXPORT_SYMBOL_GPL(part_end_io_acct); + +void disk_end_io_acct(struct gendisk *disk, unsigned int op, + unsigned long start_time) +{ + __part_end_io_acct(&disk->part0, op, start_time); +} EXPORT_SYMBOL(disk_end_io_acct); /* -- cgit v1.2.3 From 769b628de0b9a7ab0b81e6eecca5ee7bf66e978d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 11 Sep 2020 18:40:49 -0400 Subject: blk-iocost: fix divide-by-zero in transfer_surpluses() Conceptually, root_iocg->hweight_donating must be less than WEIGHT_ONE but all hweight calculations round up and thus it may end up >= WEIGHT_ONE triggering divide-by-zero and other issues. Bound the value to avoid surprises. Fixes: e08d02aa5fc9 ("blk-iocost: implement Andy's method for donation weight updates") Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index fc897bb142bc..6e29b4dcf356 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1881,15 +1881,21 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) /* * Calculate the global donation rate (gamma) - the rate to adjust - * non-donating budgets by. No need to use 64bit multiplication here as - * the first operand is guaranteed to be smaller than WEIGHT_ONE - * (1<<16). + * non-donating budgets by. + * + * No need to use 64bit multiplication here as the first operand is + * guaranteed to be smaller than WEIGHT_ONE (1<<16). + * + * We know that there are beneficiary nodes and the sum of the donating + * hweights can't be whole; however, due to the round-ups during hweight + * calculations, root_iocg->hweight_donating might still end up equal to + * or greater than whole. Limit the range when calculating the divider. * * gamma = (1 - t_r') / (1 - t_r) */ gamma = DIV_ROUND_UP( (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE, - WEIGHT_ONE - root_iocg->hweight_donating); + WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1)); /* * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner -- cgit v1.2.3 From aa67db24b6761277d731cc3434420283479927ca Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 14 Sep 2020 11:05:13 -0400 Subject: iocost: fix infinite loop bug in adjust_inuse_and_calc_cost() adjust_inuse_and_calc_cost() is responsible for reducing the amount of donated weights dynamically in period as the budget runs low. Because we don't want to do full donation calculation in period, we keep latching up inuse by INUSE_ADJ_STEP_PCT of the active weight of the cgroup until the resulting hweight_inuse is satisfactory. Unfortunately, the adj_step calculation was reading the active weight before acquiring ioc->lock. Because the current thread could have lost race to activate the iocg to another thread before entering this function, it may read the active weight as zero before acquiring ioc->lock. When this happens, the adj_step is calculated as zero and the incremental adjustment loop becomes an infinite one. Fix it by fetching the active weight after acquiring ioc->lock. Fixes: b0853ab4a238 ("blk-iocost: revamp in-period donation snapbacks") Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 6e29b4dcf356..ef9476fca1d8 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2323,9 +2323,8 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, { struct ioc *ioc = iocg->ioc; struct ioc_margins *margins = &ioc->margins; - u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi; - u32 hwi; + u32 hwi, adj_step; s64 margin; u64 cost, new_inuse; @@ -2354,8 +2353,15 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, return cost; } - /* bump up inuse till @abs_cost fits in the existing budget */ + /* + * Bump up inuse till @abs_cost fits in the existing budget. + * adj_step must be determined after acquiring ioc->lock - we might + * have raced and lost to another thread for activation and could + * be reading 0 iocg->active before ioc->lock which will lead to + * infinite loop. + */ new_inuse = iocg->inuse; + adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); do { new_inuse = new_inuse + adj_step; propagate_weights(iocg, iocg->active, new_inuse, true, now); -- cgit v1.2.3 From b53b072c4bb579a57c14f8e1d4ce2f4942814000 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:10:13 +0800 Subject: blk-throttle: Fix some comments' typos Fix some comments' typos. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-throttle.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index fee3325edf27..2fc6c3e0d4a0 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -150,7 +150,7 @@ struct throtl_grp { /* user configured IOPS limits */ unsigned int iops_conf[2][LIMIT_CNT]; - /* Number of bytes disptached in current slice */ + /* Number of bytes dispatched in current slice */ uint64_t bytes_disp[2]; /* Number of bio's dispatched in current slice */ unsigned int io_disp[2]; @@ -852,7 +852,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) /* * A bio has been dispatched. Also adjust slice_end. It might happen * that initially cgroup limit was very low resulting in high - * slice_end, but later limit was bumped up and bio was dispached + * slice_end, but later limit was bumped up and bio was dispatched * sooner, then we need to reduce slice_end. A high bogus slice_end * is bad because it does not allow new slice to start. */ @@ -1082,7 +1082,7 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, * If @tg doesn't currently have any bios queued in the same * direction, queueing @bio can change when @tg should be * dispatched. Mark that @tg was empty. This is automatically - * cleaered on the next tg_update_disptime(). + * cleared on the next tg_update_disptime(). */ if (!sq->nr_queued[rw]) tg->flags |= THROTL_TG_WAS_EMPTY; @@ -1303,7 +1303,7 @@ again: } } } else { - /* reached the top-level, queue issueing */ + /* reached the top-level, queue issuing */ queue_work(kthrotld_workqueue, &td->dispatch_work); } out_unlock: @@ -1314,8 +1314,8 @@ out_unlock: * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work * @work: work item being executed * - * This function is queued for execution when bio's reach the bio_lists[] - * of throtl_data->service_queue. Those bio's are ready and issued by this + * This function is queued for execution when bios reach the bio_lists[] + * of throtl_data->service_queue. Those bios are ready and issued by this * function. */ static void blk_throtl_dispatch_work_fn(struct work_struct *work) @@ -2230,7 +2230,7 @@ again: /* * @bio passed through this layer without being throttled. - * Climb up the ladder. If we''re already at the top, it + * Climb up the ladder. If we're already at the top, it * can be executed directly. */ qn = &tg->qnode_on_parent[rw]; -- cgit v1.2.3 From ff8b22c0f258c936460b436f6d78ed49a3b2a4cf Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:10:14 +0800 Subject: blk-throttle: Use readable READ/WRITE macros Use readable READ/WRITE macros instead of magic numbers. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-throttle.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2fc6c3e0d4a0..06e73ed3e774 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1428,8 +1428,8 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) * that a group's limit are dropped suddenly and we don't want to * account recently dispatched IO with new low rate. */ - throtl_start_new_slice(tg, 0); - throtl_start_new_slice(tg, 1); + throtl_start_new_slice(tg, READ); + throtl_start_new_slice(tg, WRITE); if (tg->flags & THROTL_TG_PENDING) { tg_update_disptime(tg); -- cgit v1.2.3 From e675df2adc2f0b8cbc0c64111d55b69f7253cbed Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:10:15 +0800 Subject: blk-throttle: Define readable macros instead of static variables The 'throtl_grp_quantum' and 'throtl_quantum' are both read-only variables, thus better to use readable macros instead of static variables, which can also save some spaces for .bss area. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-throttle.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 06e73ed3e774..ca9002d9d2c7 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -15,10 +15,10 @@ #include "blk-cgroup-rwstat.h" /* Max dispatch from a group in 1 round */ -static int throtl_grp_quantum = 8; +#define THROTL_GRP_QUANTUM 8 /* Total max dispatch from all groups in one round */ -static int throtl_quantum = 32; +#define THROTL_QUANTUM 32 /* Throttling is performed over a slice and after that slice is renewed */ #define DFL_THROTL_SLICE_HD (HZ / 10) @@ -1175,8 +1175,8 @@ static int throtl_dispatch_tg(struct throtl_grp *tg) { struct throtl_service_queue *sq = &tg->service_queue; unsigned int nr_reads = 0, nr_writes = 0; - unsigned int max_nr_reads = throtl_grp_quantum*3/4; - unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; + unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4; + unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads; struct bio *bio; /* Try to dispatch 75% READS and 25% WRITES */ @@ -1226,7 +1226,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) if (sq->nr_queued[0] || sq->nr_queued[1]) tg_update_disptime(tg); - if (nr_disp >= throtl_quantum) + if (nr_disp >= THROTL_QUANTUM) break; } -- cgit v1.2.3 From 4599ea49d4b00a6fbaab2977e7f8359cfc7e8916 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:10:16 +0800 Subject: blk-throttle: Avoid calculating bps/iops limitation repeatedly The tg_may_dispatch() will call tg_with_in_bps_limit() and tg_with_in_iops_limit() to check if we can dispatch a bio or not, which will calculate bps/iops limitation multiple times. But tg_may_dispatch() is always called under queue lock, which means the bps/iops limitation will not change in tg_may_dispatch(). So we can calculate the bps/iops limitation only once, and pass them to tg_with_in_bps_limit() and tg_with_in_iops_limit() to avoid calculating bps/iops limitation repeatedly. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-throttle.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index ca9002d9d2c7..8719e37c3564 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -894,7 +894,7 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) } static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, - unsigned long *wait) + u32 iops_limit, unsigned long *wait) { bool rw = bio_data_dir(bio); unsigned int io_allowed; @@ -913,7 +913,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, * have been trimmed. */ - tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd; + tmp = (u64)iops_limit * jiffy_elapsed_rnd; do_div(tmp, HZ); if (tmp > UINT_MAX) @@ -936,7 +936,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, } static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, - unsigned long *wait) + u64 bps_limit, unsigned long *wait) { bool rw = bio_data_dir(bio); u64 bytes_allowed, extra_bytes, tmp; @@ -951,7 +951,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); - tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd; + tmp = bps_limit * jiffy_elapsed_rnd; do_div(tmp, HZ); bytes_allowed = tmp; @@ -963,7 +963,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, /* Calc approx time to dispatch */ extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; - jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); + jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit); if (!jiffy_wait) jiffy_wait = 1; @@ -987,6 +987,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, { bool rw = bio_data_dir(bio); unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; + u64 bps_limit = tg_bps_limit(tg, rw); + u32 iops_limit = tg_iops_limit(tg, rw); /* * Currently whole state machine of group depends on first bio @@ -998,8 +1000,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, bio != throtl_peek_queued(&tg->service_queue.queued[rw])); /* If tg->bps = -1, then BW is unlimited */ - if (tg_bps_limit(tg, rw) == U64_MAX && - tg_iops_limit(tg, rw) == UINT_MAX) { + if (bps_limit == U64_MAX && iops_limit == UINT_MAX) { if (wait) *wait = 0; return true; @@ -1021,8 +1022,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, jiffies + tg->td->throtl_slice); } - if (tg_with_in_bps_limit(tg, bio, &bps_wait) && - tg_with_in_iops_limit(tg, bio, &iops_wait)) { + if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && + tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { if (wait) *wait = 0; return true; -- cgit v1.2.3 From 87fbeb8813bd8d48cb4b5c884793cc6b5126f6b8 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 7 Sep 2020 16:10:17 +0800 Subject: blk-throttle: Avoid checking bps/iops limitation if bps or iops is unlimited Do not need check the bps or iops limitation if bps or iops is unlimited. Signed-off-by: Baolin Wang Signed-off-by: Jens Axboe --- block/blk-throttle.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8719e37c3564..36ba61c5cdbd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -901,6 +901,12 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; u64 tmp; + if (iops_limit == UINT_MAX) { + if (wait) + *wait = 0; + return true; + } + jiffy_elapsed = jiffies - tg->slice_start[rw]; /* Round up to the next throttle slice, wait time must be nonzero */ @@ -943,6 +949,12 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned int bio_size = throtl_bio_data_size(bio); + if (bps_limit == U64_MAX) { + if (wait) + *wait = 0; + return true; + } + jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; /* Slice has just started. Consider one slice interval */ -- cgit v1.2.3 From f952eefe74a4083142b4cdf0626ebbc07378aeb5 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 20 Sep 2020 13:26:18 +0200 Subject: block: drop double zeroing sg_init_table zeroes its first argument, so the allocation of that argument doesn't have to. the semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression x; @@ x = - kzalloc + kmalloc (...) ... sg_init_table(x,...) // Signed-off-by: Julia Lawall Signed-off-by: Jens Axboe --- block/bsg-lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/bsg-lib.c b/block/bsg-lib.c index d185396d88bb..330fede77271 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -207,7 +207,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) BUG_ON(!req->nr_phys_segments); - buf->sg_list = kzalloc(sz, GFP_KERNEL); + buf->sg_list = kmalloc(sz, GFP_KERNEL); if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); -- cgit v1.2.3 From 0385971754f0aa0507ecd7f0a0f4a48781906eda Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 23 Sep 2020 17:07:13 +0200 Subject: block: fix bmd->is_null_mapped initialization bmd is allocated using kmalloc in bio_alloc_map_data, so make sure is_null_mapped is properly initialized to false for the !null_mapped case. Fixes: f3256075ba49 ("block: remove the BIO_NULL_MAPPED flag") Reported-by: Marc Hartmayer Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-map.c b/block/blk-map.c index be118926ccf4..21630dccac62 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -148,6 +148,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, * shortlived one. */ bmd->is_our_pages = !map_data; + bmd->is_null_mapped = (map_data && map_data->null_mapped); nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); if (nr_pages > BIO_MAX_PAGES) @@ -218,8 +219,6 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, } bio->bi_private = bmd; - if (map_data && map_data->null_mapped) - bmd->is_null_mapped = true; bounce_bio = bio; ret = blk_rq_append_bio(rq, &bounce_bio); -- cgit v1.2.3 From 22ada802ede89829dd010a317d9b812b7df7111a Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 21 Sep 2020 22:32:48 -0400 Subject: block: use lcm_not_zero() when stacking chunk_sectors Like 'io_opt', blk_stack_limits() should stack 'chunk_sectors' using lcm_not_zero() rather than min_not_zero() -- otherwise the final 'chunk_sectors' could result in sub-optimal alignment of IO to component devices in the IO stack. Also, if 'chunk_sectors' isn't a multiple of 'physical_block_size' then it is a bug in the driver and the device should be flagged as 'misaligned'. Reviewed-by: Ming Lei Reviewed-by: Martin K. Petersen Signed-off-by: Mike Snitzer Signed-off-by: Jens Axboe --- block/blk-settings.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-settings.c b/block/blk-settings.c index 76a7e03bcd6c..b2e1a929a6db 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -534,6 +534,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); + t->chunk_sectors = lcm_not_zero(t->chunk_sectors, b->chunk_sectors); /* Physical block size a multiple of the logical block size? */ if (t->physical_block_size & (t->logical_block_size - 1)) { @@ -556,6 +557,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ret = -1; } + /* chunk_sectors a multiple of the physical block size? */ + if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { + t->chunk_sectors = 0; + t->misaligned = 1; + ret = -1; + } + t->raid_partial_stripes_expensive = max(t->raid_partial_stripes_expensive, b->raid_partial_stripes_expensive); @@ -594,10 +602,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->discard_granularity; } - if (b->chunk_sectors) - t->chunk_sectors = min_not_zero(t->chunk_sectors, - b->chunk_sectors); - t->zoned = max(t->zoned, b->zoned); return ret; } -- cgit v1.2.3 From 07d098e6bbad04030dab5b3e64149601fcb063ce Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Mon, 21 Sep 2020 22:32:49 -0400 Subject: block: allow 'chunk_sectors' to be non-power-of-2 It is possible, albeit more unlikely, for a block device to have a non power-of-2 for chunk_sectors (e.g. 10+2 RAID6 with 128K chunk_sectors, which results in a full-stripe size of 1280K. This causes the RAID6's io_opt to be advertised as 1280K, and a stacked device _could_ then be made to use a blocksize, aka chunk_sectors, that matches non power-of-2 io_opt of underlying RAID6 -- resulting in stacked device's chunk_sectors being a non power-of-2). Update blk_queue_chunk_sectors() and blk_max_size_offset() to accommodate drivers that need a non power-of-2 chunk_sectors. Reviewed-by: Ming Lei Reviewed-by: Martin K. Petersen Signed-off-by: Mike Snitzer Signed-off-by: Jens Axboe --- block/blk-settings.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-settings.c b/block/blk-settings.c index b2e1a929a6db..5ea3de48afba 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -172,15 +172,13 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors); * * Description: * If a driver doesn't want IOs to cross a given chunk size, it can set - * this limit and prevent merging across chunks. Note that the chunk size - * must currently be a power-of-2 in sectors. Also note that the block - * layer must accept a page worth of data at any offset. So if the - * crossing of chunks is a hard limitation in the driver, it must still be - * prepared to split single page bios. + * this limit and prevent merging across chunks. Note that the block layer + * must accept a page worth of data at any offset. So if the crossing of + * chunks is a hard limitation in the driver, it must still be prepared + * to split single page bios. **/ void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) { - BUG_ON(!is_power_of_2(chunk_sectors)); q->limits.chunk_sectors = chunk_sectors; } EXPORT_SYMBOL(blk_queue_chunk_sectors); -- cgit v1.2.3 From 38430f0876fa8b9549ec434f569dce03e057c076 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 21 Sep 2020 09:19:45 +0200 Subject: block: move the NEED_PART_SCAN flag to struct gendisk We can only scan for partitions on the whole disk, so move the flag from struct block_device to struct gendisk. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 9d060e79eb31..7b56203c90a3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -731,7 +731,7 @@ static void register_disk(struct device *parent, struct gendisk *disk, if (!bdev) goto exit; - set_bit(BDEV_NEED_PART_SCAN, &bdev->bd_flags); + set_bit(GD_NEED_PART_SCAN, &disk->state); err = blkdev_get(bdev, FMODE_READ, NULL); if (err < 0) goto exit; @@ -2112,7 +2112,7 @@ bool bdev_check_media_change(struct block_device *bdev) if (__invalidate_device(bdev, true)) pr_warn("VFS: busy inodes on changed media %s\n", bdev->bd_disk->disk_name); - set_bit(BDEV_NEED_PART_SCAN, &bdev->bd_flags); + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); return true; } EXPORT_SYMBOL(bdev_check_media_change); -- cgit v1.2.3 From 9301fe734384990ef9a2463cb7aeb3b00bf5dad5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 21 Sep 2020 09:19:46 +0200 Subject: block: cleanup partition scanning in register_disk Use blkdev_get_by_dev instead of open coding it using bdget_disk + blkdev_get, and split the code to read the partition table into a separate helper to make it a little more obvious. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 7b56203c90a3..05fb27cbb667 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -673,11 +673,23 @@ static int exact_lock(dev_t devt, void *data) return 0; } +static void disk_scan_partitions(struct gendisk *disk) +{ + struct block_device *bdev; + + if (!get_capacity(disk) || !disk_part_scan_enabled(disk)) + return; + + set_bit(GD_NEED_PART_SCAN, &disk->state); + bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL); + if (!IS_ERR(bdev)) + blkdev_put(bdev, FMODE_READ); +} + static void register_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups) { struct device *ddev = disk_to_dev(disk); - struct block_device *bdev; struct disk_part_iter piter; struct hd_struct *part; int err; @@ -719,25 +731,8 @@ static void register_disk(struct device *parent, struct gendisk *disk, return; } - /* No minors to use for partitions */ - if (!disk_part_scan_enabled(disk)) - goto exit; - - /* No such device (e.g., media were just removed) */ - if (!get_capacity(disk)) - goto exit; - - bdev = bdget_disk(disk, 0); - if (!bdev) - goto exit; - - set_bit(GD_NEED_PART_SCAN, &disk->state); - err = blkdev_get(bdev, FMODE_READ, NULL); - if (err < 0) - goto exit; - blkdev_put(bdev, FMODE_READ); + disk_scan_partitions(disk); -exit: /* announce disk after possible partitions are created */ dev_set_uevent_suppress(ddev, 0); kobject_uevent(&ddev->kobj, KOBJ_ADD); -- cgit v1.2.3 From 478162821dfb4396c53f07ad82dc32cd0f525cea Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 21 Sep 2020 09:19:47 +0200 Subject: block: cleanup blkdev_bszset Use blkdev_get_by_dev instead of bdgrab + blkdev_get. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/ioctl.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/ioctl.c b/block/ioctl.c index ae74d0409afa..06262c28f0c6 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -478,15 +478,14 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, if (get_user(n, argp)) return -EFAULT; - if (!(mode & FMODE_EXCL)) { - bdgrab(bdev); - if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) - return -EBUSY; - } + if (mode & FMODE_EXCL) + return set_blocksize(bdev, n); + if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode | FMODE_EXCL, &bdev))) + return -EBUSY; ret = set_blocksize(bdev, n); - if (!(mode & FMODE_EXCL)) - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, mode | FMODE_EXCL); + return ret; } -- cgit v1.2.3 From 55b2598e84e97efc5d952958cb5e34236c43276b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Sep 2020 08:51:32 +0200 Subject: bdi: initialize ->ra_pages and ->io_pages in bdi_init Set up a readahead size by default, as very few users have a good reason to change it. This means code, ecryptfs, and orangefs now set up the values while they were previously missing it, while ubifs, mtd and vboxsf manually set it to 0 to avoid readahead. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Acked-by: David Sterba [btrfs] Acked-by: Richard Weinberger [ubifs, mtd] Signed-off-by: Jens Axboe --- block/blk-core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index ca3f0f00c943..865d39e5be2b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -538,8 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id) if (!q->stats) goto fail_stats; - q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; - q->backing_dev_info->io_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; -- cgit v1.2.3 From c2e4cd57cfa1f627b786c764d185fff85fd12be9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Sep 2020 08:51:34 +0200 Subject: block: lift setting the readahead size into the block layer Drivers shouldn't really mess with the readahead size, as that is a VM concept. Instead set it based on the optimal I/O size by lifting the algorithm from the md driver when registering the disk. Also set bdi->io_pages there as well by applying the same scheme based on max_sectors. To ensure the limits work well for stacking drivers a new helper is added to update the readahead limits from the block limits, which is also called from disk_stack_limits. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Jan Kara Reviewed-by: Mike Snitzer Reviewed-by: Martin K. Petersen Acked-by: Coly Li Signed-off-by: Jens Axboe --- block/blk-settings.c | 18 ++++++++++++++++-- block/blk-sysfs.c | 2 ++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-settings.c b/block/blk-settings.c index 5ea3de48afba..4f6eb4bb1723 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -372,6 +372,19 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) } EXPORT_SYMBOL(blk_queue_alignment_offset); +void blk_queue_update_readahead(struct request_queue *q) +{ + /* + * For read-ahead of large files to be effective, we need to read ahead + * at least twice the optimal I/O size. + */ + q->backing_dev_info->ra_pages = + max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); + q->backing_dev_info->io_pages = + queue_max_sectors(q) >> (PAGE_SHIFT - 9); +} +EXPORT_SYMBOL_GPL(blk_queue_update_readahead); + /** * blk_limits_io_min - set minimum request size for a device * @limits: the queue limits @@ -450,6 +463,8 @@ EXPORT_SYMBOL(blk_limits_io_opt); void blk_queue_io_opt(struct request_queue *q, unsigned int opt) { blk_limits_io_opt(&q->limits, opt); + q->backing_dev_info->ra_pages = + max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); } EXPORT_SYMBOL(blk_queue_io_opt); @@ -631,8 +646,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, top, bottom); } - t->backing_dev_info->io_pages = - t->limits.max_sectors >> (PAGE_SHIFT - 9); + blk_queue_update_readahead(disk->queue); } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 81722cdcf0cb..869ed21a9edc 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -854,6 +854,8 @@ int blk_register_queue(struct gendisk *disk) percpu_ref_switch_to_percpu(&q->q_usage_counter); } + blk_queue_update_readahead(q); + ret = blk_trace_init_sysfs(dev); if (ret) return ret; -- cgit v1.2.3 From ed7b6b4f6e915cb0bc52d0000bcc63168867b6ac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Sep 2020 08:51:35 +0200 Subject: bdi: remove BDI_CAP_CGROUP_WRITEBACK Just checking SB_I_CGROUPWB for cgroup writeback support is enough. Either the file system allocates its own bdi (e.g. btrfs), in which case it is known to support cgroup writeback, or the bdi comes from the block layer, which always supports cgroup writeback. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/blk-core.c | 1 - 1 file changed, 1 deletion(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 865d39e5be2b..1cc4fa6bc7fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -538,7 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id) if (!q->stats) goto fail_stats; - q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->node = node_id; atomic_set(&q->nr_active_requests_shared_sbitmap, 0); -- cgit v1.2.3 From 1cb039f3dc1619eb795c54aad0a98fdb379b4237 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 24 Sep 2020 08:51:38 +0200 Subject: bdi: replace BDI_CAP_STABLE_WRITES with a queue and a sb flag The BDI_CAP_STABLE_WRITES is one of the few bits of information in the backing_dev_info shared between the block drivers and the writeback code. To help untangling the dependency replace it with a queue flag and a superblock flag derived from it. This also helps with the case of e.g. a file system requiring stable writes due to its own checksumming, but not forcing it on other users of the block device like the swap code. One downside is that we an't support the stable_pages_required bdi attribute in sysfs anymore. It is replaced with a queue attribute which also is writable for easier testing. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/blk-integrity.c | 4 ++-- block/blk-mq-debugfs.c | 1 + block/blk-sysfs.c | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-integrity.c b/block/blk-integrity.c index c03705cbb9c9..2b36a8f9b813 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -408,7 +408,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template bi->tuple_size = template->tuple_size; bi->tag_size = template->tag_size; - disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue); #ifdef CONFIG_BLK_INLINE_ENCRYPTION if (disk->queue->ksm) { @@ -428,7 +428,7 @@ EXPORT_SYMBOL(blk_integrity_register); */ void blk_integrity_unregister(struct gendisk *disk) { - disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; + blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); } EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 645b7f800cb8..3094542e12ae 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -116,6 +116,7 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(DEAD), QUEUE_FLAG_NAME(INIT_DONE), + QUEUE_FLAG_NAME(STABLE_WRITES), QUEUE_FLAG_NAME(POLL), QUEUE_FLAG_NAME(WC), QUEUE_FLAG_NAME(FUA), diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 869ed21a9edc..76b54c7750b0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -287,6 +287,7 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); +QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); #undef QUEUE_SYSFS_BIT_FNS static ssize_t queue_zoned_show(struct request_queue *q, char *page) @@ -613,6 +614,7 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { QUEUE_RW_ENTRY(queue_nonrot, "rotational"); QUEUE_RW_ENTRY(queue_iostats, "iostats"); QUEUE_RW_ENTRY(queue_random, "add_random"); +QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); static struct attribute *queue_attrs[] = { &queue_requests_entry.attr, @@ -645,6 +647,7 @@ static struct attribute *queue_attrs[] = { &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, &queue_iostats_entry.attr, + &queue_stable_writes_entry.attr, &queue_random_entry.attr, &queue_poll_entry.attr, &queue_wc_entry.attr, -- cgit v1.2.3 From fa01b1e9733fd59ecb8b5b6d85dfb481d2025fbf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 07:40:57 +0200 Subject: block: add a bdev_is_partition helper Add a littler helper to make the somewhat arcane bd_contains checks a little more obvious. Signed-off-by: Christoph Hellwig Acked-by: Ulf Hansson Signed-off-by: Jens Axboe --- block/blk-lib.c | 2 +- block/ioctl.c | 4 ++-- block/scsi_ioctl.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-lib.c b/block/blk-lib.c index 0d1811e57ac7..e90614fd8d6a 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -64,7 +64,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, return -EINVAL; /* In case the discard request is in a partition */ - if (bdev->bd_partno) + if (bdev_is_partition(bdev)) part_offset = bdev->bd_part->start_sect; while (nr_sects) { diff --git a/block/ioctl.c b/block/ioctl.c index 06262c28f0c6..3fbc382eb926 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -23,7 +23,7 @@ static int blkpg_do_ioctl(struct block_device *bdev, return -EACCES; if (copy_from_user(&p, upart, sizeof(struct blkpg_partition))) return -EFAULT; - if (bdev != bdev->bd_contains) + if (bdev_is_partition(bdev)) return -EINVAL; if (p.pno <= 0) @@ -94,7 +94,7 @@ static int blkdev_reread_part(struct block_device *bdev) { int ret; - if (!disk_part_scan_enabled(bdev->bd_disk) || bdev != bdev->bd_contains) + if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EACCES; diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4421e61c1af1..d4abd1ed5a2d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -852,7 +852,7 @@ EXPORT_SYMBOL(scsi_cmd_ioctl); int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) { - if (bd && bd == bd->bd_contains) + if (bd && !bdev_is_partition(bd)) return 0; if (capable(CAP_SYS_RAWIO)) -- cgit v1.2.3 From 8a63a86e1f933dc7f114497408c50c58a633b8bc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 3 Sep 2020 07:41:03 +0200 Subject: block: use bd_partno in bdevname No need to go through the hd_struct to find the partition number. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/genhd.c b/block/genhd.c index 05fb27cbb667..e5f17f022ec7 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -85,7 +85,7 @@ char *disk_name(struct gendisk *hd, int partno, char *buf) const char *bdevname(struct block_device *bdev, char *buf) { - return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf); + return disk_name(bdev->bd_disk, bdev->bd_partno, buf); } EXPORT_SYMBOL(bdevname); -- cgit v1.2.3 From 021a24460dc28e7412aecfae89f60e1847e685c0 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 23 Sep 2020 16:06:51 -0400 Subject: block: add QUEUE_FLAG_NOWAIT Add QUEUE_FLAG_NOWAIT to allow a block device to advertise support for REQ_NOWAIT. Bio-based devices may set QUEUE_FLAG_NOWAIT where applicable. Update QUEUE_FLAG_MQ_DEFAULT to include QUEUE_FLAG_NOWAIT. Also update submit_bio_checks() to verify it is set for REQ_NOWAIT bios. Reported-by: Konstantin Khlebnikov Suggested-by: Christoph Hellwig Signed-off-by: Mike Snitzer Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 1cc4fa6bc7fe..4884f1e7451b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -814,9 +814,9 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) /* * For a REQ_NOWAIT based request, return -EOPNOTSUPP - * if queue is not a request based queue. + * if queue does not support NOWAIT. */ - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) + if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) goto not_supported; if (should_fail_bio(bio)) -- cgit v1.2.3 From ab8df828b5f6eab6377fa0c349c92a3678498256 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Sep 2020 20:44:52 -0400 Subject: iocost: factor out ioc_forgive_debts() Debt reduction logic is going to be improved and expanded. Factor it out into ioc_forgive_debts() and generalize the comment a bit. No functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 66 +++++++++++++++++++++++++++++------------------------- 1 file changed, 35 insertions(+), 31 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index ef9476fca1d8..bbf30bb06c07 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1979,6 +1979,40 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) list_del_init(&iocg->walk_list); } +/* + * A low weight iocg can amass a large amount of debt, for example, when + * anonymous memory gets reclaimed aggressively. If the system has a lot of + * memory paired with a slow IO device, the debt can span multiple seconds or + * more. If there are no other subsequent IO issuers, the in-debt iocg may end + * up blocked paying its debt while the IO device is idle. + * + * The following protects against such cases. If the device has been + * sufficiently idle for a while, the debts are halved. + */ +static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, + int nr_shortages, struct ioc_now *now) +{ + if (nr_shortages || + div64_u64(100 * usage_us_sum, now->now - ioc->period_at) >= + DEBT_BUSY_USAGE_PCT) + ioc->debt_busy_at = now->now; + + if (nr_debtors && + now->now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) { + struct ioc_gq *iocg; + + list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { + if (iocg->abs_vdebt) { + spin_lock(&iocg->waitq.lock); + iocg->abs_vdebt /= 2; + iocg_kick_waitq(iocg, true, now); + spin_unlock(&iocg->waitq.lock); + } + } + ioc->debt_busy_at = now->now; + } +} + static void ioc_timer_fn(struct timer_list *timer) { struct ioc *ioc = container_of(timer, struct ioc, timer); @@ -2171,37 +2205,7 @@ static void ioc_timer_fn(struct timer_list *timer) list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) list_del_init(&iocg->surplus_list); - /* - * A low weight iocg can amass a large amount of debt, for example, when - * anonymous memory gets reclaimed aggressively. If the system has a lot - * of memory paired with a slow IO device, the debt can span multiple - * seconds or more. If there are no other subsequent IO issuers, the - * in-debt iocg may end up blocked paying its debt while the IO device - * is idle. - * - * The following protects against such pathological cases. If the device - * has been sufficiently idle for a substantial amount of time, the - * debts are halved. The criteria are on the conservative side as we - * want to resolve the rare extreme cases without impacting regular - * operation by forgiving debts too readily. - */ - if (nr_shortages || - div64_u64(100 * usage_us_sum, now.now - ioc->period_at) >= - DEBT_BUSY_USAGE_PCT) - ioc->debt_busy_at = now.now; - - if (nr_debtors && - now.now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) { - list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - if (iocg->abs_vdebt) { - spin_lock(&iocg->waitq.lock); - iocg->abs_vdebt /= 2; - iocg_kick_waitq(iocg, true, &now); - spin_unlock(&iocg->waitq.lock); - } - } - ioc->debt_busy_at = now.now; - } + ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, nr_shortages, &now); /* * If q is getting clogged or we're missing too much, we're issuing -- cgit v1.2.3 From 33a1fe6d822b3552fcad9bbb059b2fb93566f4c9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Sep 2020 20:44:53 -0400 Subject: iocost: replace nr_shortages cond in ioc_forgive_debts() with busy_level one Debt reduction was blocked if any iocg was short on budget in the past period to avoid reducing debts while some iocgs are saturated. However, this ends up unnecessarily blocking debt reduction due to temporary local imbalances when the device is generally being underutilized, while also failing to block when the underlying device is overwhelmed and the usage becomes low from high latency. Given that debt accumulation mostly happens with swapout bursts which can significantly deteriorate the underlying device's latency response, the current logic is not great. Let's replace it with ioc->busy_level based condition so that we block debt reduction when the underlying device is being saturated. ioc_forgive_debts() call is moved after busy_level determination. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index bbf30bb06c07..c0499c294da9 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -287,10 +287,7 @@ enum { MIN_DELAY = 250, MAX_DELAY = 250 * USEC_PER_MSEC, - /* - * Halve debts if total usage keeps staying under 25% w/o any shortages - * for over 100ms. - */ + /* halve debts if total usage keeps staying under 25% for over 100ms */ DEBT_BUSY_USAGE_PCT = 25, DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC, @@ -1990,9 +1987,9 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) * sufficiently idle for a while, the debts are halved. */ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, - int nr_shortages, struct ioc_now *now) + struct ioc_now *now) { - if (nr_shortages || + if (ioc->busy_level < 0 || div64_u64(100 * usage_us_sum, now->now - ioc->period_at) >= DEBT_BUSY_USAGE_PCT) ioc->debt_busy_at = now->now; @@ -2205,8 +2202,6 @@ static void ioc_timer_fn(struct timer_list *timer) list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) list_del_init(&iocg->surplus_list); - ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, nr_shortages, &now); - /* * If q is getting clogged or we're missing too much, we're issuing * too much IO and should lower vtime rate. If we're not missing @@ -2301,6 +2296,8 @@ static void ioc_timer_fn(struct timer_list *timer) ioc_refresh_params(ioc, false); + ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); + /* * This period is done. Move onto the next one. If nothing's * going on with the device, stop the timer. -- cgit v1.2.3 From d95178410b7706844491efb77fb0a37a69f7a5e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Sep 2020 20:44:54 -0400 Subject: iocost: recalculate delay after debt reduction Debt sets the initial delay duration which is decayed over time. The current debt reduction halved the debt but didn't change the delay. It prevented future debts from increasing delay but didn't do anything to lower the existing delay, limiting the mechanism's ability to reduce unnecessary idling. Reset iocg->delay to 0 after debt reduction so that iocg_kick_waitq() recalculates new delay value based on the reduced debt amount. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index c0499c294da9..ffcb78126ab7 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1984,7 +1984,8 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) * up blocked paying its debt while the IO device is idle. * * The following protects against such cases. If the device has been - * sufficiently idle for a while, the debts are halved. + * sufficiently idle for a while, the debts are halved and delays are + * recalculated. */ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, struct ioc_now *now) @@ -2002,6 +2003,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, if (iocg->abs_vdebt) { spin_lock(&iocg->waitq.lock); iocg->abs_vdebt /= 2; + iocg->delay = 0; /* kick_waitq will recalc */ iocg_kick_waitq(iocg, true, now); spin_unlock(&iocg->waitq.lock); } -- cgit v1.2.3 From c7af2a003a411853fc9c805efe96d10dcc8adb5f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Sep 2020 20:44:55 -0400 Subject: iocost: reimplement debt forgiveness using average usage Debt forgiveness logic was counting the number of consecutive !busy periods as the trigger condition. While this usually works, it can easily be thrown off by temporary fluctuations especially on configurations w/ short periods. This patch reimplements debt forgiveness so that: * Use the average usage over the forgiveness period instead of counting consecutive periods. * Debt is reduced at around the target rate (1/2 every 100ms) regardless of ioc period duration. * Usage threshold is raised to 50%. Combined with the preceding changes and the switch to average usage, this makes debt forgivness a lot more effective at reducing the amount of unnecessary idleness. * Constants are renamed with DFGV_ prefix. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 94 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 25 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index ffcb78126ab7..9b1f94499432 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -287,9 +287,9 @@ enum { MIN_DELAY = 250, MAX_DELAY = 250 * USEC_PER_MSEC, - /* halve debts if total usage keeps staying under 25% for over 100ms */ - DEBT_BUSY_USAGE_PCT = 25, - DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC, + /* halve debts if avg usage over 100ms is under 50% */ + DFGV_USAGE_PCT = 50, + DFGV_PERIOD = 100 * USEC_PER_MSEC, /* don't let cmds which take a very long time pin lagging for too long */ MAX_LAGGING_PERIODS = 10, @@ -433,8 +433,10 @@ struct ioc { bool weights_updated; atomic_t hweight_gen; /* for lazy hweights */ - /* the last time debt cancel condition wasn't met */ - u64 debt_busy_at; + /* debt forgivness */ + u64 dfgv_period_at; + u64 dfgv_period_rem; + u64 dfgv_usage_us_sum; u64 autop_too_fast_at; u64 autop_too_slow_at; @@ -1251,7 +1253,8 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) if (ioc->running == IOC_IDLE) { ioc->running = IOC_RUNNING; - ioc->debt_busy_at = now->now; + ioc->dfgv_period_at = now->now; + ioc->dfgv_period_rem = 0; ioc_start_period(ioc, now); } @@ -1990,25 +1993,66 @@ static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, struct ioc_now *now) { - if (ioc->busy_level < 0 || - div64_u64(100 * usage_us_sum, now->now - ioc->period_at) >= - DEBT_BUSY_USAGE_PCT) - ioc->debt_busy_at = now->now; - - if (nr_debtors && - now->now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) { - struct ioc_gq *iocg; - - list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { - if (iocg->abs_vdebt) { - spin_lock(&iocg->waitq.lock); - iocg->abs_vdebt /= 2; - iocg->delay = 0; /* kick_waitq will recalc */ - iocg_kick_waitq(iocg, true, now); - spin_unlock(&iocg->waitq.lock); - } - } - ioc->debt_busy_at = now->now; + struct ioc_gq *iocg; + u64 dur, usage_pct, nr_cycles; + + /* if no debtor, reset the cycle */ + if (!nr_debtors) { + ioc->dfgv_period_at = now->now; + ioc->dfgv_period_rem = 0; + ioc->dfgv_usage_us_sum = 0; + return; + } + + /* + * Debtors can pass through a lot of writes choking the device and we + * don't want to be forgiving debts while the device is struggling from + * write bursts. If we're missing latency targets, consider the device + * fully utilized. + */ + if (ioc->busy_level > 0) + usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); + + ioc->dfgv_usage_us_sum += usage_us_sum; + if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) + return; + + /* + * At least DFGV_PERIOD has passed since the last period. Calculate the + * average usage and reset the period counters. + */ + dur = now->now - ioc->dfgv_period_at; + usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); + + ioc->dfgv_period_at = now->now; + ioc->dfgv_usage_us_sum = 0; + + /* if was too busy, reset everything */ + if (usage_pct > DFGV_USAGE_PCT) { + ioc->dfgv_period_rem = 0; + return; + } + + /* + * Usage is lower than threshold. Let's forgive some debts. Debt + * forgiveness runs off of the usual ioc timer but its period usually + * doesn't match ioc's. Compensate the difference by performing the + * reduction as many times as would fit in the duration since the last + * run and carrying over the left-over duration in @ioc->dfgv_period_rem + * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive + * reductions is doubled. + */ + nr_cycles = dur + ioc->dfgv_period_rem; + ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); + + list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { + if (!iocg->abs_vdebt) + continue; + spin_lock(&iocg->waitq.lock); + iocg->abs_vdebt >>= nr_cycles; + iocg->delay = 0; /* kick_waitq will recalc */ + iocg_kick_waitq(iocg, true, now); + spin_unlock(&iocg->waitq.lock); } } -- cgit v1.2.3 From c5a6561b8d99ea7d8df21308249ce05bce3dd466 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 17 Sep 2020 20:44:56 -0400 Subject: iocost: add iocg_forgive_debt tracepoint Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 9b1f94499432..328ae805e85f 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2046,12 +2046,24 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { + u64 __maybe_unused old_debt, __maybe_unused old_delay; + if (!iocg->abs_vdebt) continue; + spin_lock(&iocg->waitq.lock); + + old_debt = iocg->abs_vdebt; + old_delay = iocg->delay; + iocg->abs_vdebt >>= nr_cycles; iocg->delay = 0; /* kick_waitq will recalc */ iocg_kick_waitq(iocg, true, now); + + TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct, + old_debt, iocg->abs_vdebt, + old_delay, iocg->delay); + spin_unlock(&iocg->waitq.lock); } } -- cgit v1.2.3 From bec02dbbafad534674309f8b948094900f456797 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 18 Sep 2020 14:41:27 -0400 Subject: iocost: consider iocgs with active delays for debt forgiveness An iocg may have 0 debt but non-zero delay. The current debt forgiveness logic doesn't act on such iocgs. This can lead to unexpected behaviors - an iocg with a little bit of debt will have its delay canceled through debt forgiveness but one w/o any debt but active delay will have to wait out until its delay decays out. This patch updates the debt handling logic so that it treats delays the same as debts. If either debt or delay is active, debt forgiveness logic kicks in and acts on both the same way. Also, avoid turning the debt and delay directly to zero as that can confuse state transitions. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-iocost.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 328ae805e85f..b82649c1440b 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -2048,7 +2048,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { u64 __maybe_unused old_debt, __maybe_unused old_delay; - if (!iocg->abs_vdebt) + if (!iocg->abs_vdebt && !iocg->delay) continue; spin_lock(&iocg->waitq.lock); @@ -2056,8 +2056,11 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, old_debt = iocg->abs_vdebt; old_delay = iocg->delay; - iocg->abs_vdebt >>= nr_cycles; - iocg->delay = 0; /* kick_waitq will recalc */ + if (iocg->abs_vdebt) + iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1; + if (iocg->delay) + iocg->delay = iocg->delay >> nr_cycles ?: 1; + iocg_kick_waitq(iocg, true, now); TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct, @@ -2129,7 +2132,7 @@ static void ioc_timer_fn(struct timer_list *timer) iocg->delay) { /* might be oversleeping vtime / hweight changes, kick */ iocg_kick_waitq(iocg, true, &now); - if (iocg->abs_vdebt) + if (iocg->abs_vdebt || iocg->delay) nr_debtors++; } else if (iocg_is_idle(iocg)) { /* no waiter and idle, deactivate */ -- cgit v1.2.3 From 8229cca8c3ea70bcb3b73aa105ae1d00aafbd3c8 Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Sat, 26 Sep 2020 10:39:47 +0800 Subject: blk-mq: add cond_resched() in __blk_mq_alloc_rq_maps() We found blk_mq_alloc_rq_maps() takes more time in kernel space when testing nvme device hot-plugging. The test and anlysis as below. Debug code, 1, blk_mq_alloc_rq_maps(): u64 start, end; depth = set->queue_depth; start = ktime_get_ns(); pr_err("[%d:%s switch:%ld,%ld] queue depth %d, nr_hw_queues %d\n", current->pid, current->comm, current->nvcsw, current->nivcsw, set->queue_depth, set->nr_hw_queues); do { err = __blk_mq_alloc_rq_maps(set); if (!err) break; set->queue_depth >>= 1; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { err = -ENOMEM; break; } } while (set->queue_depth); end = ktime_get_ns(); pr_err("[%d:%s switch:%ld,%ld] all hw queues init cost time %lld ns\n", current->pid, current->comm, current->nvcsw, current->nivcsw, end - start); 2, __blk_mq_alloc_rq_maps(): u64 start, end; for (i = 0; i < set->nr_hw_queues; i++) { start = ktime_get_ns(); if (!__blk_mq_alloc_rq_map(set, i)) goto out_unwind; end = ktime_get_ns(); pr_err("hw queue %d init cost time %lld ns\n", i, end - start); } Test nvme hot-plugging with above debug code, we found it totally cost more than 3ms in kernel space without being scheduled out when alloc rqs for all 16 hw queues with depth 1023, each hw queue cost about 140-250us. The cost time will be increased with hw queue number and queue depth increasing. And in an extreme case, if __blk_mq_alloc_rq_maps() returns -ENOMEM, it will try "queue_depth >>= 1", more time will be consumed. [ 428.428771] nvme nvme0: pci function 10000:01:00.0 [ 428.428798] nvme 10000:01:00.0: enabling device (0000 -> 0002) [ 428.428806] pcieport 10000:00:00.0: can't derive routing for PCI INT A [ 428.428809] nvme 10000:01:00.0: PCI INT A: no GSI [ 432.593374] [4688:kworker/u33:8 switch:663,2] queue depth 30, nr_hw_queues 1 [ 432.593404] hw queue 0 init cost time 22883 ns [ 432.593408] [4688:kworker/u33:8 switch:663,2] all hw queues init cost time 35960 ns [ 432.595953] nvme nvme0: 16/0/0 default/read/poll queues [ 432.595958] [4688:kworker/u33:8 switch:700,2] queue depth 1023, nr_hw_queues 16 [ 432.596203] hw queue 0 init cost time 242630 ns [ 432.596441] hw queue 1 init cost time 235913 ns [ 432.596659] hw queue 2 init cost time 216461 ns [ 432.596877] hw queue 3 init cost time 215851 ns [ 432.597107] hw queue 4 init cost time 228406 ns [ 432.597336] hw queue 5 init cost time 227298 ns [ 432.597564] hw queue 6 init cost time 224633 ns [ 432.597785] hw queue 7 init cost time 219954 ns [ 432.597937] hw queue 8 init cost time 150930 ns [ 432.598082] hw queue 9 init cost time 143496 ns [ 432.598231] hw queue 10 init cost time 147261 ns [ 432.598397] hw queue 11 init cost time 164522 ns [ 432.598542] hw queue 12 init cost time 143401 ns [ 432.598692] hw queue 13 init cost time 148934 ns [ 432.598841] hw queue 14 init cost time 147194 ns [ 432.598991] hw queue 15 init cost time 148942 ns [ 432.598993] [4688:kworker/u33:8 switch:700,2] all hw queues init cost time 3035099 ns [ 432.602611] nvme0n1: p1 So use this patch to trigger schedule between each hw queue init, to avoid other threads getting stuck. It is not in atomic context when executing __blk_mq_alloc_rq_maps(), so it is safe to call cond_resched(). Signed-off-by: Xianting Tian Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index e04b759add75..176698b2285b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3261,9 +3261,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; - for (i = 0; i < set->nr_hw_queues; i++) + for (i = 0; i < set->nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_request(set, i)) goto out_unwind; + cond_resched(); + } return 0; -- cgit v1.2.3 From 76cffccd606acffde1b91e8b029b39b5fd1a3117 Mon Sep 17 00:00:00 2001 From: yangerkun Date: Sat, 19 Sep 2020 11:54:25 +0800 Subject: block-mq: fix comments in blk_mq_queue_tag_busy_iter 'f5bbbbe4d635 ("blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter")' introduce a bug what we may sleep between rcu lock. Then '530ca2c9bd69 ("blk-mq: Allow blocking queue tag iter callbacks")' fix it by get request_queue's ref. And 'a9a808084d6a ("block: Remove the synchronize_rcu() call from __blk_mq_update_nr_hw_queues()")' remove the synchronize_rcu in __blk_mq_update_nr_hw_queues. We need update the confused comments in blk_mq_queue_tag_busy_iter. Signed-off-by: yangerkun Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index aacf10decdbd..9c92053e704d 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -416,9 +416,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, /* * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx * while the queue is frozen. So we can use q_usage_counter to avoid - * racing with it. __blk_mq_update_nr_hw_queues() uses - * synchronize_rcu() to ensure this function left the critical section - * below. + * racing with it. */ if (!percpu_ref_tryget(&q->q_usage_counter)) return; -- cgit v1.2.3