diff options
author | Tejun Heo <tj@kernel.org> | 2015-08-18 14:55:02 -0700 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-08-18 15:49:16 -0700 |
commit | 2da8de0bb799bf2bdfa893e5a1e294eb6bafba62 (patch) | |
tree | 6f8898af2df9a9bd687e97b08a0d7cdbc52dcd17 /block/cfq-iosched.c | |
parent | d93a11f1cd890d4ea72f7cef75fac56801b099b3 (diff) | |
download | linux-stable-2da8de0bb799bf2bdfa893e5a1e294eb6bafba62.tar.gz linux-stable-2da8de0bb799bf2bdfa893e5a1e294eb6bafba62.tar.bz2 linux-stable-2da8de0bb799bf2bdfa893e5a1e294eb6bafba62.zip |
cfq-iosched: remove @gfp_mask from cfq_find_alloc_queue()
Even when allocations fail, cfq_find_alloc_queue() always returns a
valid cfq_queue by falling back to the oom cfq_queue. As such, there
isn't much point in taking @gfp_mask and trying "harder" if __GFP_WAIT
is set. GFP_NOWAIT allocations don't fail often and even when they do
the degraded behavior is acceptable and temporary.
After all, the only reason get_request(), which ultimately determines
the gfp_mask, cares about __GFP_WAIT is to guarantee request
allocation, assuming IO forward progress, for callers which are
willing to wait. There's no reason for cfq_find_alloc_queue() to
behave differently on __GFP_WAIT when it already has a fallback
mechanism.
Remove @gfp_mask from cfq_find_alloc_queue() and propagate the changes
to its callers. This simplifies the function quite a bit and will
help making async queues per-cfq_group.
v2: Updated to reflect GFP_ATOMIC -> GPF_NOWAIT.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 45 |
1 files changed, 10 insertions, 35 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5f119292a254..146b03d64b7e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -883,8 +883,7 @@ static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, static void cfq_dispatch_insert(struct request_queue *, struct request *); static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, - struct cfq_io_cq *cic, struct bio *bio, - gfp_t gfp_mask); + struct cfq_io_cq *cic, struct bio *bio); static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) { @@ -3575,7 +3574,7 @@ static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) cfqq = cic_to_cfqq(cic, false); if (cfqq) { cfq_put_queue(cfqq); - cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, GFP_NOWAIT); + cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio); cic_set_cfqq(cic, cfqq, false); } @@ -3643,13 +3642,12 @@ static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { static struct cfq_queue * cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, - struct bio *bio, gfp_t gfp_mask) + struct bio *bio) { struct blkcg *blkcg; - struct cfq_queue *cfqq, *new_cfqq = NULL; + struct cfq_queue *cfqq; struct cfq_group *cfqg; -retry: rcu_read_lock(); blkcg = bio_blkcg(bio); @@ -3666,27 +3664,9 @@ retry: * originally, since it should just be a temporary situation. */ if (!cfqq || cfqq == &cfqd->oom_cfqq) { - cfqq = NULL; - if (new_cfqq) { - cfqq = new_cfqq; - new_cfqq = NULL; - } else if (gfp_mask & __GFP_WAIT) { - rcu_read_unlock(); - spin_unlock_irq(cfqd->queue->queue_lock); - new_cfqq = kmem_cache_alloc_node(cfq_pool, - gfp_mask | __GFP_ZERO, - cfqd->queue->node); - spin_lock_irq(cfqd->queue->queue_lock); - if (new_cfqq) - goto retry; - else - return &cfqd->oom_cfqq; - } else { - cfqq = kmem_cache_alloc_node(cfq_pool, - gfp_mask | __GFP_ZERO, - cfqd->queue->node); - } - + cfqq = kmem_cache_alloc_node(cfq_pool, + GFP_NOWAIT | __GFP_ZERO, + cfqd->queue->node); if (cfqq) { cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_prio_data(cfqq, cic); @@ -3696,9 +3676,6 @@ retry: cfqq = &cfqd->oom_cfqq; } out: - if (new_cfqq) - kmem_cache_free(cfq_pool, new_cfqq); - rcu_read_unlock(); return cfqq; } @@ -3723,7 +3700,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) static struct cfq_queue * cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, - struct bio *bio, gfp_t gfp_mask) + struct bio *bio) { int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); @@ -3742,7 +3719,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, goto out; } - cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); + cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio); /* * pin the queue now that it's allocated, scheduler exit will prune it @@ -4286,8 +4263,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, const bool is_sync = rq_is_sync(rq); struct cfq_queue *cfqq; - might_sleep_if(gfp_mask & __GFP_WAIT); - spin_lock_irq(q->queue_lock); check_ioprio_changed(cic, bio); @@ -4297,7 +4272,7 @@ new_queue: if (!cfqq || cfqq == &cfqd->oom_cfqq) { if (cfqq) cfq_put_queue(cfqq); - cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); + cfqq = cfq_get_queue(cfqd, is_sync, cic, bio); cic_set_cfqq(cic, cfqq, is_sync); } else { /* |