summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2018-09-11 14:41:27 -0400
committerJens Axboe <axboe@kernel.dk>2018-09-21 20:29:03 -0600
commit49f4c2dc2b5066e9211101c59cc0828e81d41614 (patch)
tree217391585a20535d3c2606d98659786bf73682e2 /include
parent27e6fa996c534c32702aa4d32db0ffa383acd050 (diff)
downloadlinux-49f4c2dc2b5066e9211101c59cc0828e81d41614.tar.gz
linux-49f4c2dc2b5066e9211101c59cc0828e81d41614.tar.bz2
linux-49f4c2dc2b5066e9211101c59cc0828e81d41614.zip
blkcg: update blkg_lookup_create to do locking
To know when to create a blkg, the general pattern is to do a blkg_lookup and if that fails, lock and then do a lookup again and if that fails finally create. It doesn't make much sense for everyone who wants to do creation to write this themselves. This changes blkg_lookup_create to do locking and implement this pattern. The old blkg_lookup_create is renamed to __blkg_lookup_create. If a call site wants to do its own error handling or already owns the queue lock, they can use __blkg_lookup_create. This will be used in upcoming patches. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-cgroup.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 24067a1f8b36..cc0f238530f6 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -897,7 +899,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
+ blkg = __blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
spin_unlock_irq(q->queue_lock);