summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 13:52:31 -0700
committerTejun Heo <tj@kernel.org>2013-05-14 13:52:31 -0700
commitcb76199c36a7ccf0947ef4875b32e0940f50d1a8 (patch)
treed14184fb947d7cc622eed013d6a27ad4672fa9fc
parent632b44935f4c99a61c56f8a6f805a1080ab5a432 (diff)
downloadlinux-cb76199c36a7ccf0947ef4875b32e0940f50d1a8.tar.gz
linux-cb76199c36a7ccf0947ef4875b32e0940f50d1a8.tar.bz2
linux-cb76199c36a7ccf0947ef4875b32e0940f50d1a8.zip
blk-throttle: collapse throtl_dispatch() into the work function
blk-throttle is about to go through major restructuring to support hierarchy. Do cosmetic updates in preparation. * s/throtl_data->throtl_work/throtl_data->dispatch_work/ * s/blk_throtl_work()/blk_throtl_dispatch_work_fn()/ * Collapse throtl_dispatch() into blk_throtl_dispatch_work_fn() This patch is purely cosmetic. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
-rw-r--r--block/blk-throttle.c26
1 files changed, 9 insertions, 17 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 7dbd0e695df0..0a0bc00059df 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -108,7 +108,7 @@ struct throtl_data
unsigned int nr_undestroyed_grps;
/* Work for dispatching throttled bios */
- struct delayed_work throtl_work;
+ struct delayed_work dispatch_work;
};
/* list and work item to allocate percpu group stats */
@@ -820,10 +820,12 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
return nr_disp;
}
-/* Dispatch throttled bios. Should be called without queue lock held. */
-static int throtl_dispatch(struct request_queue *q)
+/* work function to dispatch throttled bios */
+void blk_throtl_dispatch_work_fn(struct work_struct *work)
{
- struct throtl_data *td = q->td;
+ struct throtl_data *td = container_of(to_delayed_work(work),
+ struct throtl_data, dispatch_work);
+ struct request_queue *q = td->queue;
unsigned int nr_disp = 0;
struct bio_list bio_list_on_stack;
struct bio *bio;
@@ -859,16 +861,6 @@ out:
generic_make_request(bio);
blk_finish_plug(&plug);
}
- return nr_disp;
-}
-
-void blk_throtl_work(struct work_struct *work)
-{
- struct throtl_data *td = container_of(work, struct throtl_data,
- throtl_work.work);
- struct request_queue *q = td->queue;
-
- throtl_dispatch(q);
}
/* Call with queue lock held */
@@ -876,7 +868,7 @@ static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{
- struct delayed_work *dwork = &td->throtl_work;
+ struct delayed_work *dwork = &td->dispatch_work;
if (total_nr_queued(td)) {
mod_delayed_work(kthrotld_workqueue, dwork, delay);
@@ -1057,7 +1049,7 @@ static void throtl_shutdown_wq(struct request_queue *q)
{
struct throtl_data *td = q->td;
- cancel_delayed_work_sync(&td->throtl_work);
+ cancel_delayed_work_sync(&td->dispatch_work);
}
static struct blkcg_policy blkcg_policy_throtl = {
@@ -1206,7 +1198,7 @@ int blk_throtl_init(struct request_queue *q)
return -ENOMEM;
td->tg_service_tree = THROTL_RB_ROOT;
- INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
+ INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
q->td = td;
td->queue = q;