summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h101
1 files changed, 71 insertions, 30 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d37751789bf5..9a1f0ee40b56 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -268,10 +268,16 @@ static inline dev_t disk_devt(struct gendisk *disk)
return MKDEV(disk->major, disk->first_minor);
}
+/*
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
+ */
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+
/* blk_validate_limits() validates bsize, so drivers don't usually need to */
static inline int blk_validate_block_size(unsigned long bsize)
{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
return -EINVAL;
return 0;
@@ -562,7 +568,22 @@ struct request_queue {
struct blk_flush_queue *fq;
struct list_head flush_list;
+ /*
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
+ */
+ struct mutex elevator_lock;
+
struct mutex sysfs_lock;
+ /*
+ * Protects queue limits and also sysfs attribute read_ahead_kb.
+ */
struct mutex limits_lock;
/*
@@ -691,23 +712,6 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
(q->limits.features & BLK_FEAT_ZONED);
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return disk->nr_zones;
-}
-bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
-static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
-{
- return false;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(disk->queue))
@@ -715,11 +719,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return disk_nr_zones(bdev->bd_disk);
-}
-
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -826,6 +825,51 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
(sb->s_blocksize_bits - SECTOR_SHIFT);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
+}
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
+{
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
+
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
+}
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
+{
+ return disk_zone_capacity(bdev->bd_disk, pos);
+}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
+
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void put_disk(struct gendisk *disk);
@@ -1593,6 +1637,7 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
int set_blocksize(struct file *file, int size);
int lookup_bdev(const char *pathname, dev_t *dev);
@@ -1649,10 +1694,6 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-/* just for blk-cgroup, don't use elsewhere */
-struct block_device *blkdev_get_no_open(dev_t dev);
-void blkdev_put_no_open(struct block_device *bdev);
-
struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
bool disk_live(struct gendisk *disk);
@@ -1664,7 +1705,7 @@ int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
-void bdev_statx(struct path *, struct kstat *, u32);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
@@ -1682,8 +1723,8 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
static inline void sync_bdevs(bool wait)
{
}
-static inline void bdev_statx(struct path *path, struct kstat *stat,
- u32 request_mask)
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
{
}
static inline void printk_all_partitions(void)