diff options
author | Jens Axboe <axboe@fb.com> | 2014-05-09 13:41:15 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-09 13:41:15 -0600 |
commit | 59d13bf5f57ded658c872fa22276f75ab8f12841 (patch) | |
tree | ea3b3bf9395c01a5443c2e01874837a19d6e4c62 /block/blk-mq-tag.h | |
parent | 4bb659b156996f2993dc16fad71fec9ee070153c (diff) | |
download | linux-59d13bf5f57ded658c872fa22276f75ab8f12841.tar.gz linux-59d13bf5f57ded658c872fa22276f75ab8f12841.tar.bz2 linux-59d13bf5f57ded658c872fa22276f75ab8f12841.zip |
blk-mq: use sparser tag layout for lower queue depth
For best performance, spreading tags over multiple cachelines
makes the tagging more efficient on multicore systems. But since
we have 8 * sizeof(unsigned long) tags per cacheline, we don't
always get a nice spread.
Attempt to spread the tags over at least 4 cachelines, using fewer
number of bits per unsigned long if we have to. This improves
tagging performance in setups with 32-128 tags. For higher depths,
the spread is the same as before (BITS_PER_LONG tags per cacheline).
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-tag.h')
-rw-r--r-- | block/blk-mq-tag.h | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 06d4a2f0f7a0..7aa9f0665489 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -11,8 +11,8 @@ struct bt_wait_state { wait_queue_head_t wait; } ____cacheline_aligned_in_smp; -#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG) -#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1)) +#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) +#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) struct blk_mq_bitmap { unsigned long word; @@ -22,9 +22,10 @@ struct blk_mq_bitmap { struct blk_mq_bitmap_tags { unsigned int depth; unsigned int wake_cnt; + unsigned int bits_per_word; - struct blk_mq_bitmap *map; unsigned int map_nr; + struct blk_mq_bitmap *map; unsigned int wake_index; struct bt_wait_state *bs; |