summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2019-05-21 15:59:03 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-10-05 13:13:55 +0200
commitd54de8cfa4a1343419b21b463fd6aa8492226f80 (patch)
treedfc899a7e5752e4634b49567d054110e3d4cb5ef /include/linux/blkdev.h
parent6c7b70e220d5d9236b943c026ba4492d7a488c47 (diff)
downloadlinux-stable-d54de8cfa4a1343419b21b463fd6aa8492226f80.tar.gz
linux-stable-d54de8cfa4a1343419b21b463fd6aa8492226f80.tar.bz2
linux-stable-d54de8cfa4a1343419b21b463fd6aa8492226f80.zip
block: make rq sector size accessible for block stats
[ Upstream commit 3d24430694077313c75c6b89f618db09943621e4 ] Currently rq->data_len will be decreased by partial completion or zeroed by completion, so when blk_stat_add() is invoked, data_len will be zero and there will never be samples in poll_cb because blk_mq_poll_stats_bkt() will return -1 if data_len is zero. We could move blk_stat_add() back to __blk_mq_complete_request(), but that would make the effort of trying to call ktime_get_ns() once in vain. Instead we can reuse throtl_size field, and use it for both block stats and block throttle, and adjust the logic in blk_mq_poll_stats_bkt() accordingly. Fixes: 4bc6339a583c ("block: move blk_stat_add() to __blk_mq_end_request()") Tested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h15
1 files changed, 12 insertions, 3 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 93baef66b942..4c6754e53672 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -202,9 +202,12 @@ struct request {
#ifdef CONFIG_BLK_WBT
unsigned short wbt_flags;
#endif
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- unsigned short throtl_size;
-#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
/*
* Number of scatter-gather DMA addr+len pairs after
@@ -902,6 +905,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
* blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
*/
static inline sector_t blk_rq_pos(const struct request *rq)
{
@@ -930,6 +934,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{