summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 13:15:02 -0800
committerJens Axboe <axboe@kernel.dk>2012-03-06 21:27:22 +0100
commit0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a (patch)
tree271f62b5f75c239831c7def1c445a6e990366730 /block/blk-throttle.c
parent2a7f124414b35645049e9c1b125a6f0b470aa5ae (diff)
downloadlinux-stable-0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a.tar.gz
linux-stable-0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a.tar.bz2
linux-stable-0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a.zip
blkcg: update blkg get functions take blkio_cgroup as parameter
In both blkg get functions - throtl_get_tg() and cfq_get_cfqg(), instead of obtaining blkcg of %current explicitly, let the caller specify the blkcg to use as parameter and make both functions hold on to the blkcg. This is part of block cgroup interface cleanup and will help making blkcg API more modular. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9beaac7fb397..c252df9169db 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
+ struct blkio_cgroup *blkcg)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
/* no throttling for dead queue */
if (unlikely(blk_queue_bypass(q)))
return NULL;
- blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
if (tg)
return tg;
+ if (!css_tryget(&blkcg->css))
+ return NULL;
+
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* Group allocated and queue is still alive. take the lock */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
+ css_put(&blkcg->css);
/* Make sure @q is still alive */
if (unlikely(blk_queue_bypass(q))) {
@@ -339,11 +342,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
}
/*
- * Initialize the new group. After sleeping, read the blkcg again.
- */
- blkcg = task_blkio_cgroup(current);
-
- /*
* If some other thread already allocated the group while we were
* not holding queue lock, free up the group
*/
@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_get_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;