summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-01-11 13:58:04 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-01-11 13:58:04 -0800
commit01d550f0fcc06c7292f79a6f1453aac122d1d2c8 (patch)
tree58b58ac1cb833af0469b1942774a382633bc6cda /block
parentd05e626603d57936314816433db8bf1d34b5a504 (diff)
parent587371ed783b046f22ba7a5e1cc9a19ae35123b4 (diff)
downloadlinux-stable-01d550f0fcc06c7292f79a6f1453aac122d1d2c8.tar.gz
linux-stable-01d550f0fcc06c7292f79a6f1453aac122d1d2c8.tar.bz2
linux-stable-01d550f0fcc06c7292f79a6f1453aac122d1d2c8.zip
Merge tag 'for-6.8/block-2024-01-08' of git://git.kernel.dk/linux
Pull block updates from Jens Axboe: "Pretty quiet round this time around. This contains: - NVMe updates via Keith: - nvme fabrics spec updates (Guixin, Max) - nvme target udpates (Guixin, Evan) - nvme attribute refactoring (Daniel) - nvme-fc numa fix (Keith) - MD updates via Song: - Fix/Cleanup RCU usage from conf->disks[i].rdev (Yu Kuai) - Fix raid5 hang issue (Junxiao Bi) - Add Yu Kuai as Reviewer of the md subsystem - Remove deprecated flavors (Song Liu) - raid1 read error check support (Li Nan) - Better handle events off-by-1 case (Alex Lyakas) - Efficiency improvements for passthrough (Kundan) - Support for mapping integrity data directly (Keith) - Zoned write fix (Damien) - rnbd fixes (Kees, Santosh, Supriti) - Default to a sane discard size granularity (Christoph) - Make the default max transfer size naming less confusing (Christoph) - Remove support for deprecated host aware zoned model (Christoph) - Misc fixes (me, Li, Matthew, Min, Ming, Randy, liyouhong, Daniel, Bart, Christoph)" * tag 'for-6.8/block-2024-01-08' of git://git.kernel.dk/linux: (78 commits) block: Treat sequential write preferred zone type as invalid block: remove disk_clear_zoned sd: remove the !ZBC && blk_queue_is_zoned case in sd_read_block_characteristics drivers/block/xen-blkback/common.h: Fix spelling typo in comment blk-cgroup: fix rcu lockdep warning in blkg_lookup() blk-cgroup: don't use removal safe list iterators block: floor the discard granularity to the physical block size mtd_blkdevs: use the default discard granularity bcache: use the default discard granularity zram: use the default discard granularity null_blk: use the default discard granularity nbd: use the default discard granularity ubd: use the default discard granularity block: default the discard granularity to sector size bcache: discard_granularity should not be smaller than a sector block: remove two comments in bio_split_discard block: rename and document BLK_DEF_MAX_SECTORS loop: don't abuse BLK_DEF_MAX_SECTORS aoe: don't abuse BLK_DEF_MAX_SECTORS null_blk: don't cap max_hw_sectors to BLK_DEF_MAX_SECTORS ...
Diffstat (limited to 'block')
-rw-r--r--block/bio-integrity.c218
-rw-r--r--block/bio.c53
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-mq.c3
-rw-r--r--block/blk-rq-qos.h2
-rw-r--r--block/blk-settings.c107
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/blk-wbt.c13
-rw-r--r--block/blk-wbt.h5
-rw-r--r--block/blk-zoned.c21
-rw-r--r--block/blk.h2
-rw-r--r--block/genhd.c5
-rw-r--r--block/ioctl.c11
-rw-r--r--block/partitions/core.c12
17 files changed, 320 insertions, 185 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index ec8ac8cf6e1b..feef615e2c9c 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -69,15 +69,15 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
memset(bip, 0, sizeof(*bip));
+ /* always report as many vecs as asked explicitly, not inline vecs */
+ bip->bip_max_vcnt = nr_vecs;
if (nr_vecs > inline_vecs) {
- bip->bip_max_vcnt = nr_vecs;
bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
&bip->bip_max_vcnt, gfp_mask);
if (!bip->bip_vec)
goto err;
} else {
bip->bip_vec = bip->bip_inline_vecs;
- bip->bip_max_vcnt = inline_vecs;
}
bip->bip_bio = bio;
@@ -91,6 +91,47 @@ err:
}
EXPORT_SYMBOL(bio_integrity_alloc);
+static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
+ bool dirty)
+{
+ int i;
+
+ for (i = 0; i < nr_vecs; i++) {
+ if (dirty && !PageCompound(bv[i].bv_page))
+ set_page_dirty_lock(bv[i].bv_page);
+ unpin_user_page(bv[i].bv_page);
+ }
+}
+
+static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
+{
+ unsigned short nr_vecs = bip->bip_max_vcnt - 1;
+ struct bio_vec *copy = &bip->bip_vec[1];
+ size_t bytes = bip->bip_iter.bi_size;
+ struct iov_iter iter;
+ int ret;
+
+ iov_iter_bvec(&iter, ITER_DEST, copy, nr_vecs, bytes);
+ ret = copy_to_iter(bvec_virt(bip->bip_vec), bytes, &iter);
+ WARN_ON_ONCE(ret != bytes);
+
+ bio_integrity_unpin_bvec(copy, nr_vecs, true);
+}
+
+static void bio_integrity_unmap_user(struct bio_integrity_payload *bip)
+{
+ bool dirty = bio_data_dir(bip->bip_bio) == READ;
+
+ if (bip->bip_flags & BIP_COPY_USER) {
+ if (dirty)
+ bio_integrity_uncopy_user(bip);
+ kfree(bvec_virt(bip->bip_vec));
+ return;
+ }
+
+ bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt, dirty);
+}
+
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
@@ -105,6 +146,8 @@ void bio_integrity_free(struct bio *bio)
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(bvec_virt(bip->bip_vec));
+ else if (bip->bip_flags & BIP_INTEGRITY_USER)
+ bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
@@ -160,6 +203,177 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_integrity_add_page);
+static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
+ int nr_vecs, unsigned int len,
+ unsigned int direction, u32 seed)
+{
+ bool write = direction == ITER_SOURCE;
+ struct bio_integrity_payload *bip;
+ struct iov_iter iter;
+ void *buf;
+ int ret;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (write) {
+ iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
+ if (!copy_from_iter_full(buf, len, &iter)) {
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
+ } else {
+ memset(buf, 0, len);
+
+ /*
+ * We need to preserve the original bvec and the number of vecs
+ * in it for completion handling
+ */
+ bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
+ }
+
+ if (IS_ERR(bip)) {
+ ret = PTR_ERR(bip);
+ goto free_buf;
+ }
+
+ if (write)
+ bio_integrity_unpin_bvec(bvec, nr_vecs, false);
+ else
+ memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
+
+ ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
+ offset_in_page(buf));
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto free_bip;
+ }
+
+ bip->bip_flags |= BIP_INTEGRITY_USER | BIP_COPY_USER;
+ bip->bip_iter.bi_sector = seed;
+ return 0;
+free_bip:
+ bio_integrity_free(bio);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
+ int nr_vecs, unsigned int len, u32 seed)
+{
+ struct bio_integrity_payload *bip;
+
+ bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
+ if (IS_ERR(bip))
+ return PTR_ERR(bip);
+
+ memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
+ bip->bip_flags |= BIP_INTEGRITY_USER;
+ bip->bip_iter.bi_sector = seed;
+ bip->bip_iter.bi_size = len;
+ return 0;
+}
+
+static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
+ int nr_vecs, ssize_t bytes, ssize_t offset)
+{
+ unsigned int nr_bvecs = 0;
+ int i, j;
+
+ for (i = 0; i < nr_vecs; i = j) {
+ size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
+ struct folio *folio = page_folio(pages[i]);
+
+ bytes -= size;
+ for (j = i + 1; j < nr_vecs; j++) {
+ size_t next = min_t(size_t, PAGE_SIZE, bytes);
+
+ if (page_folio(pages[j]) != folio ||
+ pages[j] != pages[j - 1] + 1)
+ break;
+ unpin_user_page(pages[j]);
+ size += next;
+ bytes -= next;
+ }
+
+ bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
+ offset = 0;
+ nr_bvecs++;
+ }
+
+ return nr_bvecs;
+}
+
+int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
+ u32 seed)
+{
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ unsigned int align = q->dma_pad_mask | queue_dma_alignment(q);
+ struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
+ struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
+ unsigned int direction, nr_bvecs;
+ struct iov_iter iter;
+ int ret, nr_vecs;
+ size_t offset;
+ bool copy;
+
+ if (bio_integrity(bio))
+ return -EINVAL;
+ if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
+ return -E2BIG;
+
+ if (bio_data_dir(bio) == READ)
+ direction = ITER_DEST;
+ else
+ direction = ITER_SOURCE;
+
+ iov_iter_ubuf(&iter, direction, ubuf, bytes);
+ nr_vecs = iov_iter_npages(&iter, BIO_MAX_VECS + 1);
+ if (nr_vecs > BIO_MAX_VECS)
+ return -E2BIG;
+ if (nr_vecs > UIO_FASTIOV) {
+ bvec = kcalloc(sizeof(*bvec), nr_vecs, GFP_KERNEL);
+ if (!bvec)
+ return -ENOMEM;
+ pages = NULL;
+ }
+
+ copy = !iov_iter_is_aligned(&iter, align, align);
+ ret = iov_iter_extract_pages(&iter, &pages, bytes, nr_vecs, 0, &offset);
+ if (unlikely(ret < 0))
+ goto free_bvec;
+
+ nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
+ if (pages != stack_pages)
+ kvfree(pages);
+ if (nr_bvecs > queue_max_integrity_segments(q))
+ copy = true;
+
+ if (copy)
+ ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
+ direction, seed);
+ else
+ ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes, seed);
+ if (ret)
+ goto release_pages;
+ if (bvec != stack_vec)
+ kfree(bvec);
+
+ return 0;
+
+release_pages:
+ bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
+free_bvec:
+ if (bvec != stack_vec)
+ kfree(bvec);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bio_integrity_map_user);
+
/**
* bio_integrity_process - Process integrity metadata for a bio
* @bio: bio to generate/verify integrity metadata for
diff --git a/block/bio.c b/block/bio.c
index 816d412c06e9..b9642a41f286 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -944,7 +944,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
if ((addr1 | mask) != (addr2 | mask))
return false;
- if (bv->bv_len + len > queue_max_segment_size(q))
+ if (len > queue_max_segment_size(q) - bv->bv_len)
return false;
return bvec_try_merge_page(bv, page, len, offset, same_page);
}
@@ -966,10 +966,13 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page)
{
+ unsigned int max_size = max_sectors << SECTOR_SHIFT;
+
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return 0;
- if (((bio->bi_iter.bi_size + len) >> SECTOR_SHIFT) > max_sectors)
+ len = min3(len, max_size, queue_max_segment_size(q));
+ if (len > max_size - bio->bi_iter.bi_size)
return 0;
if (bio->bi_vcnt > 0) {
@@ -1145,13 +1148,22 @@ EXPORT_SYMBOL(bio_add_folio);
void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
- struct bvec_iter_all iter_all;
- struct bio_vec *bvec;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- bio_release_page(bio, bvec->bv_page);
+ bio_for_each_folio_all(fi, bio) {
+ struct page *page;
+ size_t done = 0;
+
+ if (mark_dirty) {
+ folio_lock(fi.folio);
+ folio_mark_dirty(fi.folio);
+ folio_unlock(fi.folio);
+ }
+ page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
+ do {
+ bio_release_page(bio, page++);
+ done += PAGE_SIZE;
+ } while (done < fi.length);
}
}
EXPORT_SYMBOL_GPL(__bio_release_pages);
@@ -1439,18 +1451,12 @@ EXPORT_SYMBOL(bio_free_pages);
* bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
* for performing direct-IO in BIOs.
*
- * The problem is that we cannot run set_page_dirty() from interrupt context
+ * The problem is that we cannot run folio_mark_dirty() from interrupt context
* because the required locks are not interrupt-safe. So what we can do is to
* mark the pages dirty _before_ performing IO. And in interrupt context,
* check that the pages are still dirty. If so, fine. If not, redirty them
* in process context.
*
- * We special-case compound pages here: normally this means reads into hugetlb
- * pages. The logic in here doesn't really work right for compound pages
- * because the VM does not uniformly chase down the head page in all cases.
- * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
- * handle them at all. So we skip compound pages here at an early stage.
- *
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
@@ -1466,12 +1472,12 @@ EXPORT_SYMBOL(bio_free_pages);
*/
void bio_set_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
+ bio_for_each_folio_all(fi, bio) {
+ folio_lock(fi.folio);
+ folio_mark_dirty(fi.folio);
+ folio_unlock(fi.folio);
}
}
EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
@@ -1515,12 +1521,11 @@ static void bio_dirty_fn(struct work_struct *work)
void bio_check_pages_dirty(struct bio *bio)
{
- struct bio_vec *bvec;
+ struct folio_iter fi;
unsigned long flags;
- struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, iter_all) {
- if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
+ bio_for_each_folio_all(fi, bio) {
+ if (!folio_test_dirty(fi.folio))
goto defer;
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4b48c2c44098..e303fd317313 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -575,13 +575,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
static void blkg_destroy_all(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
- struct blkcg_gq *blkg, *n;
+ struct blkcg_gq *blkg;
int count = BLKG_DESTROY_BATCH_SIZE;
int i;
restart:
spin_lock_irq(&q->queue_lock);
- list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
if (hlist_unhashed(&blkg->blkcg_node))
@@ -2064,6 +2064,9 @@ void bio_associate_blkg(struct bio *bio)
{
struct cgroup_subsys_state *css;
+ if (blk_op_is_passthrough(bio->bi_opf))
+ return;
+
rcu_read_lock();
if (bio->bi_blkg)
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index fd482439afbc..b927a4a0ad03 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -252,7 +252,8 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
if (blkcg == &blkcg_root)
return q->root_blkg;
- blkg = rcu_dereference(blkcg->blkg_hint);
+ blkg = rcu_dereference_check(blkcg->blkg_hint,
+ lockdep_is_held(&q->queue_lock));
if (blkg && blkg->q == q)
return blkg;
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eca76ccf4ee..11342af420d0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -772,6 +772,15 @@ void submit_bio_noacct(struct bio *bio)
bio_clear_polled(bio);
switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ break;
+ case REQ_OP_FLUSH:
+ /*
+ * REQ_OP_FLUSH can't be submitted through bios, it is only
+ * synthetized in struct request by the flush state machine.
+ */
+ goto not_supported;
case REQ_OP_DISCARD:
if (!bdev_max_discard_sectors(bdev))
goto not_supported;
@@ -785,6 +794,10 @@ void submit_bio_noacct(struct bio *bio)
if (status != BLK_STS_OK)
goto end_io;
break;
+ case REQ_OP_WRITE_ZEROES:
+ if (!q->limits.max_write_zeroes_sectors)
+ goto not_supported;
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
@@ -796,12 +809,15 @@ void submit_bio_noacct(struct bio *bio)
if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
goto not_supported;
break;
- case REQ_OP_WRITE_ZEROES:
- if (!q->limits.max_write_zeroes_sectors)
- goto not_supported;
- break;
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ /*
+ * Driver private operations are only used with passthrough
+ * requests.
+ */
+ fallthrough;
default:
- break;
+ goto not_supported;
}
if (blk_throtl_bio(bio))
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 65e75efa9bd3..2d470cf2173e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -115,17 +115,13 @@ static struct bio *bio_split_discard(struct bio *bio,
*nsegs = 1;
- /* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(lim->discard_granularity >> 9, 1U);
max_discard_sectors =
min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
max_discard_sectors -= max_discard_sectors % granularity;
-
- if (unlikely(!max_discard_sectors)) {
- /* XXX: warn */
+ if (unlikely(!max_discard_sectors))
return NULL;
- }
if (bio_sectors(bio) <= max_discard_sectors)
return NULL;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ac18f802c027..c11c97afa0bc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1248,7 +1248,8 @@ void blk_mq_start_request(struct request *rq)
trace_block_rq_issue(rq);
- if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+ if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
+ !blk_rq_is_passthrough(rq)) {
rq->io_start_time_ns = ktime_get_ns();
rq->stats_sectors = blk_rq_sectors(rq);
rq->rq_flags |= RQF_STATS;
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index f48ee150d667..37245c97ee61 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -118,7 +118,7 @@ static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
static inline void rq_qos_done(struct request_queue *q, struct request *rq)
{
- if (q->rq_qos)
+ if (q->rq_qos && !blk_rq_is_passthrough(rq))
__rq_qos_done(q->rq_qos, rq);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 0046b447268f..06ea91e51b8b 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -48,7 +48,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_discard_sectors = 0;
lim->max_hw_discard_sectors = 0;
lim->max_secure_erase_sectors = 0;
- lim->discard_granularity = 0;
+ lim->discard_granularity = 512;
lim->discard_alignment = 0;
lim->discard_misaligned = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
@@ -56,7 +56,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->alignment_offset = 0;
lim->io_opt = 0;
lim->misaligned = 0;
- lim->zoned = BLK_ZONED_NONE;
+ lim->zoned = false;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
}
@@ -127,8 +127,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
- printk(KERN_INFO "%s: set to minimum %d\n",
- __func__, max_hw_sectors);
+ pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
}
max_hw_sectors = round_down(max_hw_sectors,
@@ -140,7 +139,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
if (limits->max_user_sectors)
max_sectors = min(max_sectors, limits->max_user_sectors);
else
- max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
+ max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP);
max_sectors = round_down(max_sectors,
limits->logical_block_size >> SECTOR_SHIFT);
@@ -248,8 +247,7 @@ void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments
{
if (!max_segments) {
max_segments = 1;
- printk(KERN_INFO "%s: set to minimum %d\n",
- __func__, max_segments);
+ pr_info("%s: set to minimum %u\n", __func__, max_segments);
}
q->limits.max_segments = max_segments;
@@ -285,8 +283,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_SIZE) {
max_size = PAGE_SIZE;
- printk(KERN_INFO "%s: set to minimum %d\n",
- __func__, max_size);
+ pr_info("%s: set to minimum %u\n", __func__, max_size);
}
/* see blk_queue_virt_boundary() for the explanation */
@@ -312,6 +309,9 @@ void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
limits->logical_block_size = size;
+ if (limits->discard_granularity < limits->logical_block_size)
+ limits->discard_granularity = limits->logical_block_size;
+
if (limits->physical_block_size < size)
limits->physical_block_size = size;
@@ -342,6 +342,9 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
if (q->limits.physical_block_size < q->limits.logical_block_size)
q->limits.physical_block_size = q->limits.logical_block_size;
+ if (q->limits.discard_granularity < q->limits.physical_block_size)
+ q->limits.discard_granularity = q->limits.physical_block_size;
+
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
@@ -740,8 +743,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_SIZE - 1) {
mask = PAGE_SIZE - 1;
- printk(KERN_INFO "%s: set to minimum %lx\n",
- __func__, mask);
+ pr_info("%s: set to minimum %lx\n", __func__, mask);
}
q->limits.seg_boundary_mask = mask;
@@ -841,8 +843,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
-
- wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
@@ -884,81 +884,22 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
-static bool disk_has_partitions(struct gendisk *disk)
-{
- unsigned long idx;
- struct block_device *part;
- bool ret = false;
-
- rcu_read_lock();
- xa_for_each(&disk->part_tbl, idx, part) {
- if (bdev_is_partition(part)) {
- ret = true;
- break;
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
/**
- * disk_set_zoned - configure the zoned model for a disk
- * @disk: the gendisk of the queue to configure
- * @model: the zoned model to set
- *
- * Set the zoned model of @disk to @model.
- *
- * When @model is BLK_ZONED_HM (host managed), this should be called only
- * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
- * If @model specifies BLK_ZONED_HA (host aware), the effective model used
- * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
- * on the disk.
+ * disk_set_zoned - inidicate a zoned device
+ * @disk: gendisk to configure
*/
-void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
+void disk_set_zoned(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
- unsigned int old_model = q->limits.zoned;
-
- switch (model) {
- case BLK_ZONED_HM:
- /*
- * Host managed devices are supported only if
- * CONFIG_BLK_DEV_ZONED is enabled.
- */
- WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
- break;
- case BLK_ZONED_HA:
- /*
- * Host aware devices can be treated either as regular block
- * devices (similar to drive managed devices) or as zoned block
- * devices to take advantage of the zone command set, similarly
- * to host managed devices. We try the latter if there are no
- * partitions and zoned block device support is enabled, else
- * we do nothing special as far as the block layer is concerned.
- */
- if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
- disk_has_partitions(disk))
- model = BLK_ZONED_NONE;
- break;
- case BLK_ZONED_NONE:
- default:
- if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
- model = BLK_ZONED_NONE;
- break;
- }
- q->limits.zoned = model;
- if (model != BLK_ZONED_NONE) {
- /*
- * Set the zone write granularity to the device logical block
- * size by default. The driver can change this value if needed.
- */
- blk_queue_zone_write_granularity(q,
- queue_logical_block_size(q));
- } else if (old_model != BLK_ZONED_NONE) {
- disk_clear_zone_settings(disk);
- }
+ WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
+
+ /*
+ * Set the zone write granularity to the device logical block
+ * size by default. The driver can change this value if needed.
+ */
+ q->limits.zoned = true;
+ blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
}
EXPORT_SYMBOL_GPL(disk_set_zoned);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 0b2d04766324..6b2429cad81a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -241,7 +241,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (max_sectors_kb == 0) {
q->limits.max_user_sectors = 0;
max_sectors_kb = min(max_hw_sectors_kb,
- BLK_DEF_MAX_SECTORS >> 1);
+ BLK_DEF_MAX_SECTORS_CAP >> 1);
} else {
if (max_sectors_kb > max_hw_sectors_kb ||
max_sectors_kb < page_kb)
@@ -309,14 +309,9 @@ QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- return sprintf(page, "host-aware\n");
- case BLK_ZONED_HM:
+ if (blk_queue_is_zoned(q))
return sprintf(page, "host-managed\n");
- default:
- return sprintf(page, "none\n");
- }
+ return sprintf(page, "none\n");
}
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 0bb613139bec..5ba3cd574eac 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -84,8 +84,6 @@ struct rq_wb {
u64 sync_issue;
void *sync_cookie;
- unsigned int wc;
-
unsigned long last_issue; /* last non-throttled issue */
unsigned long last_comp; /* last non-throttled comp */
unsigned long min_lat_nsec;
@@ -207,7 +205,8 @@ static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
*/
if (wb_acct & WBT_DISCARD)
limit = rwb->wb_background;
- else if (rwb->wc && !wb_recent_wait(rwb))
+ else if (test_bit(QUEUE_FLAG_WC, &rwb->rqos.disk->queue->queue_flags) &&
+ !wb_recent_wait(rwb))
limit = 0;
else
limit = rwb->wb_normal;
@@ -699,13 +698,6 @@ static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
}
}
-void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-{
- struct rq_qos *rqos = wbt_rq_qos(q);
- if (rqos)
- RQWB(rqos)->wc = write_cache_on;
-}
-
/*
* Enable wbt if defaults are configured that way
*/
@@ -918,7 +910,6 @@ int wbt_init(struct gendisk *disk)
rwb->last_comp = rwb->last_issue = jiffies;
rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT;
- rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
rwb->rq_depth.queue_depth = blk_queue_depth(q);
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 8a029e138f7a..e5fc653b9b76 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -12,8 +12,6 @@ u64 wbt_get_min_lat(struct request_queue *q);
void wbt_set_min_lat(struct request_queue *q, u64 val);
bool wbt_disabled(struct request_queue *);
-void wbt_set_write_cache(struct request_queue *, bool);
-
u64 wbt_default_latency_nsec(struct request_queue *);
#else
@@ -24,9 +22,6 @@ static inline void wbt_disable_default(struct gendisk *disk)
static inline void wbt_enable_default(struct gendisk *disk)
{
}
-static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
-{
-}
#endif /* CONFIG_BLK_WBT */
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 619ee41a51cc..d343e5756a9c 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -498,7 +498,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
set_bit(idx, args->conv_zones_bitmap);
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
- case BLK_ZONE_TYPE_SEQWRITE_PREF:
if (!args->seq_zones_wlock) {
args->seq_zones_wlock =
blk_alloc_zone_bitmap(q->node, args->nr_zones);
@@ -506,6 +505,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
return -ENOMEM;
}
break;
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
default:
pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
disk->disk_name, (int)zone->type, zone->start);
@@ -615,22 +615,3 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
-
-void disk_clear_zone_settings(struct gendisk *disk)
-{
- struct request_queue *q = disk->queue;
-
- blk_mq_freeze_queue(q);
-
- disk_free_zone_bitmaps(disk);
- blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
- q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
- disk->nr_zones = 0;
- disk->max_open_zones = 0;
- disk->max_active_zones = 0;
- q->limits.chunk_sectors = 0;
- q->limits.zone_write_granularity = 0;
- q->limits.max_zone_append_sectors = 0;
-
- blk_mq_unfreeze_queue(q);
-}
diff --git a/block/blk.h b/block/blk.h
index 08a358bc0919..1ef920f72e0f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -395,14 +395,12 @@ static inline struct bio *blk_queue_bounce(struct bio *bio,
#ifdef CONFIG_BLK_DEV_ZONED
void disk_free_zone_bitmaps(struct gendisk *disk);
-void disk_clear_zone_settings(struct gendisk *disk);
int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
unsigned long arg);
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
-static inline void disk_clear_zone_settings(struct gendisk *disk) {}
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
unsigned int cmd, unsigned long arg)
{
diff --git a/block/genhd.c b/block/genhd.c
index c9d06f72c587..d74fb5b4ae68 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -432,7 +432,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
DISK_MAX_PARTS);
disk->minors = DISK_MAX_PARTS;
}
- if (disk->first_minor + disk->minors > MINORMASK + 1)
+ if (disk->first_minor > MINORMASK ||
+ disk->minors > MINORMASK + 1 ||
+ disk->first_minor + disk->minors > MINORMASK + 1)
goto out_exit_elevator;
} else {
if (WARN_ON(disk->minors))
@@ -542,6 +544,7 @@ out_put_holder_dir:
kobject_put(disk->part0->bd_holder_dir);
out_del_block_link:
sysfs_remove_link(block_depr, dev_name(ddev));
+ pm_runtime_set_memalloc_noio(ddev, false);
out_device_del:
device_del(ddev);
out_free_ext_minor:
diff --git a/block/ioctl.c b/block/ioctl.c
index 4160f4e6bd5b..9c73a763ef88 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
{
struct gendisk *disk = bdev->bd_disk;
struct blkpg_partition p;
- long long start, length;
+ sector_t start, length;
if (disk->flags & GENHD_FL_NO_PART)
return -EINVAL;
@@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_device *bdev,
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(disk, p.pno);
+ if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
+ return -EINVAL;
+ /* Check that the partition is aligned to the block size */
+ if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
+ return -EINVAL;
+
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;
switch (op) {
case BLKPG_ADD_PARTITION:
- /* check if partition is aligned to blocksize */
- if (p.start & (bdev_logical_block_size(bdev) - 1))
- return -EINVAL;
return bdev_add_partition(disk, p.pno, start, length);
case BLKPG_RESIZE_PARTITION:
return bdev_resize_partition(disk, p.pno, start, length);
diff --git a/block/partitions/core.c b/block/partitions/core.c
index f47ffcfdfcec..e6ac73617f3e 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -305,18 +305,10 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
* Partitions are not supported on zoned block devices that are used as
* such.
*/
- switch (disk->queue->limits.zoned) {
- case BLK_ZONED_HM:
+ if (bdev_is_zoned(disk->part0)) {
pr_warn("%s: partitions not supported on host managed zoned block device\n",
disk->disk_name);
return ERR_PTR(-ENXIO);
- case BLK_ZONED_HA:
- pr_info("%s: disabling host aware zoned block device support due to partitions\n",
- disk->disk_name);
- disk_set_zoned(disk, BLK_ZONED_NONE);
- break;
- case BLK_ZONED_NONE:
- break;
}
if (xa_load(&disk->part_tbl, partno))
@@ -613,7 +605,7 @@ static int blk_add_partitions(struct gendisk *disk)
/*
* Partitions are not supported on host managed zoned block devices.
*/
- if (disk->queue->limits.zoned == BLK_ZONED_HM) {
+ if (bdev_is_zoned(disk->part0)) {
pr_warn("%s: ignoring partition table on host managed zoned block device\n",
disk->disk_name);
ret = 0;