summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 13:52:34 -0700
committerTejun Heo <tj@kernel.org>2013-05-14 13:52:34 -0700
commit49a2f1e3f231f6b2ccfc8192f4c395de7fa910a1 (patch)
treeaa1f0a5ff403d778b5b97997cdaec361eb79a0b9 /block
parent0049af73bb4b74d1407db59caefc5fe057ee434a (diff)
downloadlinux-49a2f1e3f231f6b2ccfc8192f4c395de7fa910a1.tar.gz
linux-49a2f1e3f231f6b2ccfc8192f4c395de7fa910a1.tar.bz2
linux-49a2f1e3f231f6b2ccfc8192f4c395de7fa910a1.zip
blk-throttle: add throtl_grp->service_queue
Currently, there's single service_queue per queue - throtl_data->service_queue. All active throtl_grp's are queued on the queue and dispatched according to their limits. To support hierarchy, this will be expanded such that active throtl_grp's form a tree anchored at throtl_data->service_queue and chained through each intermediate throtl_grp's service_queue. This patch adds throtl_grp->service_queue to prepare for hierarchy support. The initialization function - throtl_service_queue_init() - is added and replaces the macro initializer. The newly added tg->service_queue isn't used yet. Following patches will do. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-throttle.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ebaaaa9f57d6..7340440ccfb5 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -33,9 +33,6 @@ struct throtl_service_queue {
unsigned long first_pending_disptime; /* disptime of the first tg */
};
-#define THROTL_SERVICE_QUEUE_INITIALIZER \
- (struct throtl_service_queue){ .pending_tree = RB_ROOT }
-
enum tg_state_flags {
THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
};
@@ -60,6 +57,9 @@ struct throtl_grp {
/* throtl_data this group belongs to */
struct throtl_data *td;
+ /* this group's service queue */
+ struct throtl_service_queue service_queue;
+
/*
* Dispatch time in jiffies. This is the estimated time when group
* will unthrottle and is ready to dispatch more bio. It is used as
@@ -190,11 +190,18 @@ alloc_stats:
goto alloc_stats;
}
+/* init a service_queue, assumes the caller zeroed it */
+static void throtl_service_queue_init(struct throtl_service_queue *sq)
+{
+ sq->pending_tree = RB_ROOT;
+}
+
static void throtl_pd_init(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
unsigned long flags;
+ throtl_service_queue_init(&tg->service_queue);
RB_CLEAR_NODE(&tg->rb_node);
tg->td = blkg->q->td;
bio_list_init(&tg->bio_lists[0]);
@@ -1168,8 +1175,8 @@ int blk_throtl_init(struct request_queue *q)
if (!td)
return -ENOMEM;
- td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER;
INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
+ throtl_service_queue_init(&td->service_queue);
q->td = td;
td->queue = q;