summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-09 13:10:39 -0600
committerJens Axboe <axboe@kernel.dk>2021-10-18 06:17:35 -0600
commit349302da83529539040d2516de1deec1e09f491c (patch)
tree3d8049f24e9c7f2c32ef7ae88322d6828d50af59 /block/blk-mq-tag.c
parent9672b0d43782047b1825a96bafee1b6aefa35bc2 (diff)
downloadlinux-stable-349302da83529539040d2516de1deec1e09f491c.tar.gz
linux-stable-349302da83529539040d2516de1deec1e09f491c.tar.bz2
linux-stable-349302da83529539040d2516de1deec1e09f491c.zip
block: improve batched tag allocation
Add a blk_mq_get_tags() helper, which uses the new sbitmap API for allocating a batch of tags all at once. This both simplifies the block code for batched allocation, and it is also more efficient than just doing repeated calls into __sbitmap_queue_get(). This reduces the sbitmap overhead in peak runs from ~3% to ~1% and yields a performanc increase from 6.6M IOPS to 6.8M IOPS for a single CPU core. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 72a2724a4eee..c43b97201161 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -86,6 +86,21 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
return __sbitmap_queue_get(bt);
}
+unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
+ unsigned int *offset)
+{
+ struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+ struct sbitmap_queue *bt = &tags->bitmap_tags;
+ unsigned long ret;
+
+ if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
+ data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
+ return 0;
+ ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
+ *offset += tags->nr_reserved_tags;
+ return ret;
+}
+
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);