summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-06-05 13:38:39 -0600
committerJens Axboe <axboe@fb.com>2014-06-05 13:38:39 -0600
commit762380ad9322951cea4ce9d24864265f9c66a916 (patch)
tree9ec3fe551583dcb9243d02a9728511e99216dcdc /block
parent046f153343e33dcad1be7f6249ea6ff1c6fd9b58 (diff)
downloadlinux-stable-762380ad9322951cea4ce9d24864265f9c66a916.tar.gz
linux-stable-762380ad9322951cea4ce9d24864265f9c66a916.tar.bz2
linux-stable-762380ad9322951cea4ce9d24864265f9c66a916.zip
block: add notion of a chunk size for request merging
Some drivers have different limits on what size a request should optimally be, depending on the offset of the request. Similar to dividing a device into chunks. Add a setting that allows the driver to inform the block layer of such a chunk size. The block layer will then prevent merging across the chunks. This is needed to optimally support NVMe with a non-zero stripe size. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-settings.c18
2 files changed, 20 insertions, 1 deletions
diff --git a/block/bio.c b/block/bio.c
index 96d28eee8a1e..97e832cc9b9c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -849,7 +849,8 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
+
+ return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector));
}
EXPORT_SYMBOL(bio_add_page);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 5d21239bc859..a2b9cb195e70 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
@@ -277,6 +278,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
+ * blk_queue_chunk_sectors - set size of the chunk for this queue
+ * @q: the request queue for the device
+ * @chunk_sectors: chunk sectors in the usual 512b unit
+ *
+ * Description:
+ * If a driver doesn't want IOs to cross a given chunk size, it can set
+ * this limit and prevent merging across chunks. Note that the chunk size
+ * must currently be a power-of-2 in sectors.
+ **/
+void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
+{
+ BUG_ON(!is_power_of_2(chunk_sectors));
+ q->limits.chunk_sectors = chunk_sectors;
+}
+EXPORT_SYMBOL(blk_queue_chunk_sectors);
+
+/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
* @max_discard_sectors: maximum number of sectors to discard