summaryrefslogtreecommitdiffstats
path: root/block/bdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/bdev.c')
-rw-r--r--block/bdev.c67
1 files changed, 51 insertions, 16 deletions
diff --git a/block/bdev.c b/block/bdev.c
index 6a34179192c9..889ec6e002d7 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -152,27 +152,65 @@ static void set_init_blocksize(struct block_device *bdev)
get_order(bsize));
}
-int set_blocksize(struct file *file, int size)
+/**
+ * bdev_validate_blocksize - check that this block size is acceptable
+ * @bdev: blockdevice to check
+ * @block_size: block size to check
+ *
+ * For block device users that do not use buffer heads or the block device
+ * page cache, make sure that this block size can be used with the device.
+ *
+ * Return: On success zero is returned, negative error code on failure.
+ */
+int bdev_validate_blocksize(struct block_device *bdev, int block_size)
{
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev = I_BDEV(inode);
-
- if (blk_validate_block_size(size))
+ if (blk_validate_block_size(block_size))
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
- if (size < bdev_logical_block_size(bdev))
+ if (block_size < bdev_logical_block_size(bdev))
return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
+
+int set_blocksize(struct file *file, int size)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct block_device *bdev = I_BDEV(inode);
+ int ret;
+
+ ret = bdev_validate_blocksize(bdev, size);
+ if (ret)
+ return ret;
+
if (!file->private_data)
return -EINVAL;
/* Don't change the size if it is same as current */
if (inode->i_blkbits != blksize_bits(size)) {
+ /*
+ * Flush and truncate the pagecache before we reconfigure the
+ * mapping geometry because folio sizes are variable now. If a
+ * reader has already allocated a folio whose size is smaller
+ * than the new min_order but invokes readahead after the new
+ * min_order becomes visible, readahead will think there are
+ * "zero" blocks per folio and crash. Take the inode and
+ * invalidation locks to avoid racing with
+ * read/write/fallocate.
+ */
+ inode_lock(inode);
+ filemap_invalidate_lock(inode->i_mapping);
+
sync_blockdev(bdev);
+ kill_bdev(bdev);
+
inode->i_blkbits = blksize_bits(size);
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
kill_bdev(bdev);
+ filemap_invalidate_unlock(inode->i_mapping);
+ inode_unlock(inode);
}
return 0;
}
@@ -777,13 +815,13 @@ static void blkdev_put_part(struct block_device *part)
blkdev_put_whole(whole);
}
-struct block_device *blkdev_get_no_open(dev_t dev)
+struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
{
struct block_device *bdev;
struct inode *inode;
inode = ilookup(blockdev_superblock, dev);
- if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
+ if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
blk_request_module(dev);
inode = ilookup(blockdev_superblock, dev);
if (inode)
@@ -1005,7 +1043,7 @@ struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
if (ret)
return ERR_PTR(ret);
- bdev = blkdev_get_no_open(dev);
+ bdev = blkdev_get_no_open(dev, true);
if (!bdev)
return ERR_PTR(-ENXIO);
@@ -1274,18 +1312,15 @@ void sync_bdevs(bool wait)
*/
void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
{
- struct inode *backing_inode;
struct block_device *bdev;
- backing_inode = d_backing_inode(path->dentry);
-
/*
- * Note that backing_inode is the inode of a block device node file,
- * not the block device's internal inode. Therefore it is *not* valid
- * to use I_BDEV() here; the block device has to be looked up by i_rdev
+ * Note that d_backing_inode() returns the block device node inode, not
+ * the block device's internal inode. Therefore it is *not* valid to
+ * use I_BDEV() here; the block device has to be looked up by i_rdev
* instead.
*/
- bdev = blkdev_get_no_open(backing_inode->i_rdev);
+ bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
if (!bdev)
return;