diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-15 12:17:28 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-15 12:17:28 -0700 |
commit | 0d945c1f966b2bcb67bb12be749da0a7fb00201b (patch) | |
tree | 05a4bf8f0d43cf95878316a0d1e93f018c0bbec9 /block/blk-throttle.c | |
parent | 6d46964230d182c4b6097379738849a809d791dc (diff) | |
download | linux-0d945c1f966b2bcb67bb12be749da0a7fb00201b.tar.gz linux-0d945c1f966b2bcb67bb12be749da0a7fb00201b.tar.bz2 linux-0d945c1f966b2bcb67bb12be749da0a7fb00201b.zip |
block: remove the queue_lock indirection
With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Fixed floppy and blk-cgroup missing conversions and half done edits.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a665b0950369..d0a23f0bb3ed 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1243,7 +1243,7 @@ static void throtl_pending_timer_fn(struct timer_list *t) bool dispatched; int ret; - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); if (throtl_can_upgrade(td, NULL)) throtl_upgrade_state(td); @@ -1266,9 +1266,9 @@ again: break; /* this dispatch windows is still open, relax and repeat */ - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); cpu_relax(); - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); } if (!dispatched) @@ -1290,7 +1290,7 @@ again: queue_work(kthrotld_workqueue, &td->dispatch_work); } out_unlock: - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); } /** @@ -1314,11 +1314,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work) bio_list_init(&bio_list_on_stack); - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); for (rw = READ; rw <= WRITE; rw++) while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) bio_list_add(&bio_list_on_stack, bio); - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); if (!bio_list_empty(&bio_list_on_stack)) { blk_start_plug(&plug); @@ -2141,7 +2141,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw]) goto out; - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2224,7 +2224,7 @@ again: } out_unlock: - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); out: bio_set_flag(bio, BIO_THROTTLED); @@ -2345,7 +2345,7 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq) * Dispatch all currently throttled bios on @q through ->make_request_fn(). */ void blk_throtl_drain(struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) + __releases(&q->queue_lock) __acquires(&q->queue_lock) { struct throtl_data *td = q->td; struct blkcg_gq *blkg; @@ -2368,7 +2368,7 @@ void blk_throtl_drain(struct request_queue *q) tg_drain_bios(&td->service_queue); rcu_read_unlock(); - spin_unlock_irq(q->queue_lock); + spin_unlock_irq(&q->queue_lock); /* all bios now should be in td->service_queue, issue them */ for (rw = READ; rw <= WRITE; rw++) @@ -2376,7 +2376,7 @@ void blk_throtl_drain(struct request_queue *q) NULL))) generic_make_request(bio); - spin_lock_irq(q->queue_lock); + spin_lock_irq(&q->queue_lock); } int blk_throtl_init(struct request_queue *q) |