summaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2022-03-14 14:30:11 -1000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-04-15 14:14:41 +0200
commit6281beee5bb9c6d1756080052a9f989d387cf466 (patch)
tree27662235b63e0e6366e5f90c72a6247a49593763 /block/blk-merge.c
parent79e2f40c210a47f283bca352745068207798fbb9 (diff)
downloadlinux-stable-6281beee5bb9c6d1756080052a9f989d387cf466.tar.gz
linux-stable-6281beee5bb9c6d1756080052a9f989d387cf466.tar.bz2
linux-stable-6281beee5bb9c6d1756080052a9f989d387cf466.zip
block: don't merge across cgroup boundaries if blkcg is enabled
commit 6b2b04590b51aa4cf395fcd185ce439cab5961dc upstream. blk-iocost and iolatency are cgroup aware rq-qos policies but they didn't disable merges across different cgroups. This obviously can lead to accounting and control errors but more importantly to priority inversions - e.g. an IO which belongs to a higher priority cgroup or IO class may end up getting throttled incorrectly because it gets merged to an IO issued from a low priority cgroup. Fix it by adding blk_cgroup_mergeable() which is called from merge paths and rejects cross-cgroup and cross-issue_as_root merges. Signed-off-by: Tejun Heo <tj@kernel.org> Fixes: d70675121546 ("block: introduce blk-iolatency io controller") Cc: stable@vger.kernel.org # v4.19+ Cc: Josef Bacik <jbacik@fb.com> Link: https://lore.kernel.org/r/Yi/eE/6zFNyWJ+qd@slm.duckdns.org Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7efa8c3e2b72..8054410536fd 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -7,6 +7,8 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
+#include <linux/blkdev.h>
+#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
@@ -486,6 +488,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
goto no_merge;
+ if (!blk_cgroup_mergeable(req, bio))
+ goto no_merge;
+
if (blk_integrity_merge_bio(q, req, bio) == false)
goto no_merge;
@@ -609,6 +614,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (total_phys_segments > queue_max_segments(q))
return 0;
+ if (!blk_cgroup_mergeable(req, next->bio))
+ return 0;
+
if (blk_integrity_merge_rq(q, req, next) == false)
return 0;
@@ -843,6 +851,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
return false;
+ /* don't merge across cgroup boundaries */
+ if (!blk_cgroup_mergeable(rq, bio))
+ return false;
+
/* only merge integrity protected bio into ditto rq */
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
return false;