summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h40
1 files changed, 31 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3177181c4326..c9cb12483e12 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -11,7 +11,6 @@
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/pfn.h>
@@ -57,7 +56,7 @@ struct blk_keyslot_manager;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
+#define BLKCG_MAX_POLS 6
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
@@ -398,8 +397,6 @@ struct request_queue {
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
- struct backing_dev_info *backing_dev_info;
-
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
@@ -424,6 +421,8 @@ struct request_queue {
spinlock_t queue_lock;
+ struct gendisk *disk;
+
/*
* queue kobject
*/
@@ -664,8 +663,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
-#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
-
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
@@ -941,6 +938,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#endif
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
@@ -1139,7 +1140,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
-void blk_queue_update_readahead(struct request_queue *q);
+void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@@ -1521,6 +1522,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+ if (req_op(req) == REQ_OP_DISCARD &&
+ queue_max_discard_segments(req->q) > 1)
+ return true;
+ return false;
+}
+
static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1855,6 +1872,13 @@ struct block_device_operations {
char *(*devnode)(struct gendisk *disk, umode_t *mode);
struct module *owner;
const struct pr_ops *pr_ops;
+
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
#ifdef CONFIG_COMPAT
@@ -1984,8 +2008,6 @@ void blkdev_put_no_open(struct block_device *bdev);
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
struct block_device *I_BDEV(struct inode *inode);
-struct block_device *bdgrab(struct block_device *bdev);
-void bdput(struct block_device *);
int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
loff_t lend);