summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-07-12 11:49:54 -0700
committerJens Axboe <axboe@kernel.dk>2017-07-29 09:00:03 -0600
commit007cc56b7eeca8848021bc43aca2b8607fbe5589 (patch)
treed178bdf4a51e3a0b6b4b72341dd10b9aa3d299c4
parentca1136c99b66b1566781ff12ecddc635d570f932 (diff)
downloadlinux-007cc56b7eeca8848021bc43aca2b8607fbe5589.tar.gz
linux-007cc56b7eeca8848021bc43aca2b8607fbe5589.tar.bz2
linux-007cc56b7eeca8848021bc43aca2b8607fbe5589.zip
block: always attach cgroup info into bio
blkcg_bio_issue_check() already gets blkcg for a BIO. bio_associate_blkcg() uses a percpu refcounter, so it's a very cheap operation. There is no point we don't attach the cgroup info into bio at blkcg_bio_issue_check. This also makes blktrace outputs correct cgroup info. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-throttle.c7
-rw-r--r--include/linux/blk-cgroup.h3
2 files changed, 4 insertions, 6 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a7285bf2831c..a6ebd2bdb4df 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2104,14 +2104,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- int ret;
-
- ret = bio_associate_current(bio);
- if (ret == 0 || ret == -EBUSY)
+ if (bio->bi_css)
bio->bi_cg_private = tg;
blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
-#else
- bio_associate_current(bio);
#endif
}
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 7104bea8dab1..9d92153dd856 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -691,6 +691,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
rcu_read_lock();
blkcg = bio_blkcg(bio);
+ /* associate blkcg if bio hasn't attached one */
+ bio_associate_blkcg(bio, &blkcg->css);
+
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock);