summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h41
1 files changed, 26 insertions, 15 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 08a727b40816..76f0a4e7c2e5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -200,8 +200,6 @@ struct gendisk {
spinlock_t zone_wplugs_lock;
struct mempool_s *zone_wplugs_pool;
struct hlist_head *zone_wplugs_hash;
- struct list_head zone_wplugs_err_list;
- struct work_struct zone_wplugs_work;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -333,8 +331,8 @@ typedef unsigned int __bitwise blk_features_t;
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
-/* stacked device can/does support atomic writes */
-#define BLK_FEAT_ATOMIC_WRITES_STACKED \
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
((__force blk_features_t)(1u << 16))
/*
@@ -583,6 +581,12 @@ struct request_queue {
#ifdef CONFIG_LOCKDEP
struct task_struct *mq_freeze_owner;
int mq_freeze_owner_depth;
+ /*
+ * Records disk & queue state in current context, used in unfreeze
+ * queue
+ */
+ bool mq_freeze_disk_dead;
+ bool mq_freeze_queue_dying;
#endif
wait_queue_head_t mq_freeze_wq;
/*
@@ -940,8 +944,7 @@ static inline unsigned int blk_boundary_sectors_left(sector_t offset,
* the caller can modify. The caller must call queue_limits_commit_update()
* to finish the update.
*
- * Context: process context. The caller must have frozen the queue or ensured
- * that there is outstanding I/O by other means.
+ * Context: process context.
*/
static inline struct queue_limits
queue_limits_start_update(struct request_queue *q)
@@ -949,6 +952,8 @@ queue_limits_start_update(struct request_queue *q)
mutex_lock(&q->limits_lock);
return q->limits;
}
+int queue_limits_commit_update_frozen(struct request_queue *q,
+ struct queue_limits *lim);
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
@@ -1421,6 +1426,9 @@ static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
return is_seq;
}
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+
static inline unsigned int queue_dma_alignment(const struct request_queue *q)
{
return q->limits.dma_alignment;
@@ -1698,6 +1706,15 @@ struct io_comp_batch {
void (*complete)(struct io_comp_batch *);
};
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+ struct queue_limits *limits)
+{
+ unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+ limits->atomic_write_hw_boundary);
+
+ return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
+}
+
static inline bool bdev_can_atomic_write(struct block_device *bdev)
{
struct request_queue *bd_queue = bdev->bd_queue;
@@ -1706,15 +1723,9 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev)
if (!limits->atomic_write_unit_min)
return false;
- if (bdev_is_partition(bdev)) {
- sector_t bd_start_sect = bdev->bd_start_sect;
- unsigned int alignment =
- max(limits->atomic_write_unit_min,
- limits->atomic_write_hw_boundary);
-
- if (!IS_ALIGNED(bd_start_sect, alignment >> SECTOR_SHIFT))
- return false;
- }
+ if (bdev_is_partition(bdev))
+ return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+ limits);
return true;
}