summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2015-01-15 17:32:25 -0800
committerJens Axboe <axboe@fb.com>2015-01-23 14:15:46 -0700
commitee1b6f7aff94019c09e73837054979063f722046 (patch)
tree79c7b943d7c6d62fec1874afd2c50964de054aa0 /block
parentbb5c3cdda37aad22996d6da2addd58cadc0436c0 (diff)
downloadlinux-ee1b6f7aff94019c09e73837054979063f722046.tar.gz
linux-ee1b6f7aff94019c09e73837054979063f722046.tar.bz2
linux-ee1b6f7aff94019c09e73837054979063f722046.zip
block: support different tag allocation policy
The libata tag allocation is using a round-robin policy. Next patch will make libata use block generic tag allocation, so let's add a policy to tag allocation. Currently two policies: FIFO (default) and round-robin. Cc: Jens Axboe <axboe@fb.com> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-tag.c33
1 files changed, 25 insertions, 8 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a185b86741e5..f0344e6939d5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -119,7 +119,7 @@ fail:
}
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
- int depth)
+ int depth, int alloc_policy)
{
struct blk_queue_tag *tags;
@@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
goto fail;
atomic_set(&tags->refcnt, 1);
+ tags->alloc_policy = alloc_policy;
+ tags->next_tag = 0;
return tags;
fail:
kfree(tags);
@@ -140,10 +142,11 @@ fail:
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
+ * @alloc_policy: tag allocation policy
**/
-struct blk_queue_tag *blk_init_tags(int depth)
+struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
{
- return __blk_queue_init_tags(NULL, depth);
+ return __blk_queue_init_tags(NULL, depth, alloc_policy);
}
EXPORT_SYMBOL(blk_init_tags);
@@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
+ * @alloc_policy: tag allocation policy
*
* Queue lock must be held here if the function is called to resize an
* existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
- struct blk_queue_tag *tags)
+ struct blk_queue_tag *tags, int alloc_policy)
{
int rc;
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
- tags = __blk_queue_init_tags(q, depth);
+ tags = __blk_queue_init_tags(q, depth, alloc_policy);
if (!tags)
return -ENOMEM;
@@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
}
do {
- tag = find_first_zero_bit(bqt->tag_map, max_depth);
- if (tag >= max_depth)
- return 1;
+ if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
+ if (tag >= max_depth)
+ return 1;
+ } else {
+ int start = bqt->next_tag;
+ int size = min_t(int, bqt->max_depth, max_depth + start);
+ tag = find_next_zero_bit(bqt->tag_map, size, start);
+ if (tag >= size && start + size > bqt->max_depth) {
+ size = start + size - bqt->max_depth;
+ tag = find_first_zero_bit(bqt->tag_map, size);
+ }
+ if (tag >= size)
+ return 1;
+ }
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
@@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* See blk_queue_end_tag for details.
*/
+ bqt->next_tag = (tag + 1) % bqt->max_depth;
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;