diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-11-01 16:41:41 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-07 13:42:32 -0700 |
commit | f9cd4bfe96955e7a1d3ec54b393dee87b815ba3b (patch) | |
tree | 5d3e5dcd72b1ddbad1b592dfbbf1fa22f92fa6d2 | |
parent | a1ce35fa49852db60fc6e268038530be533c5b15 (diff) | |
download | linux-f9cd4bfe96955e7a1d3ec54b393dee87b815ba3b.tar.gz linux-f9cd4bfe96955e7a1d3ec54b393dee87b815ba3b.tar.bz2 linux-f9cd4bfe96955e7a1d3ec54b393dee87b815ba3b.zip |
block: get rid of MQ scheduler ops union
This is a remnant of when we had ops for both SQ and MQ
schedulers. Now it's just MQ, so get rid of the union.
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/bfq-iosched.c | 2 | ||||
-rw-r--r-- | block/blk-ioc.c | 8 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 33 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 20 | ||||
-rw-r--r-- | block/blk-mq.c | 12 | ||||
-rw-r--r-- | block/elevator.c | 26 | ||||
-rw-r--r-- | block/kyber-iosched.c | 2 | ||||
-rw-r--r-- | block/mq-deadline.c | 2 | ||||
-rw-r--r-- | include/linux/elevator.h | 4 |
9 files changed, 53 insertions, 56 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 44c7e567aa25..c7636cbefc85 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5724,7 +5724,7 @@ static struct elv_fs_entry bfq_attrs[] = { }; static struct elevator_type iosched_bfq_mq = { - .ops.mq = { + .ops = { .limit_depth = bfq_limit_depth, .prepare_request = bfq_prepare_request, .requeue_request = bfq_finish_requeue_request, diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 391128456aec..007aac6e6a4b 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -48,8 +48,8 @@ static void ioc_exit_icq(struct io_cq *icq) if (icq->flags & ICQ_EXITED) return; - if (et->ops.mq.exit_icq) - et->ops.mq.exit_icq(icq); + if (et->ops.exit_icq) + et->ops.exit_icq(icq); icq->flags |= ICQ_EXITED; } @@ -396,8 +396,8 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { hlist_add_head(&icq->ioc_node, &ioc->icq_list); list_add(&icq->q_node, &q->icq_list); - if (et->ops.mq.init_icq) - et->ops.mq.init_icq(icq); + if (et->ops.init_icq) + et->ops.init_icq(icq); } else { kmem_cache_free(et->icq_cache, icq); icq = ioc_lookup_icq(ioc, q); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 29bfe8017a2d..0feefd6c6aaa 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -85,14 +85,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) do { struct request *rq; - if (e->type->ops.mq.has_work && - !e->type->ops.mq.has_work(hctx)) + if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) break; if (!blk_mq_get_dispatch_budget(hctx)) break; - rq = e->type->ops.mq.dispatch_request(hctx); + rq = e->type->ops.dispatch_request(hctx); if (!rq) { blk_mq_put_dispatch_budget(hctx); break; @@ -163,7 +162,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; - const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; + const bool has_sched_dispatch = e && e->type->ops.dispatch_request; LIST_HEAD(rq_list); /* RCU or SRCU read lock is needed before checking quiesced flag */ @@ -314,9 +313,9 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); bool ret = false; - if (e && e->type->ops.mq.bio_merge) { + if (e && e->type->ops.bio_merge) { blk_mq_put_ctx(ctx); - return e->type->ops.mq.bio_merge(hctx, bio); + return e->type->ops.bio_merge(hctx, bio); } if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && @@ -380,11 +379,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head, if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) goto run; - if (e && e->type->ops.mq.insert_requests) { + if (e && e->type->ops.insert_requests) { LIST_HEAD(list); list_add(&rq->queuelist, &list); - e->type->ops.mq.insert_requests(hctx, &list, at_head); + e->type->ops.insert_requests(hctx, &list, at_head); } else { spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); @@ -403,8 +402,8 @@ void blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); struct elevator_queue *e = hctx->queue->elevator; - if (e && e->type->ops.mq.insert_requests) - e->type->ops.mq.insert_requests(hctx, list, false); + if (e && e->type->ops.insert_requests) + e->type->ops.insert_requests(hctx, list, false); else { /* * try to issue requests directly if the hw queue isn't @@ -489,15 +488,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) goto err; } - ret = e->ops.mq.init_sched(q, e); + ret = e->ops.init_sched(q, e); if (ret) goto err; blk_mq_debugfs_register_sched(q); queue_for_each_hw_ctx(q, hctx, i) { - if (e->ops.mq.init_hctx) { - ret = e->ops.mq.init_hctx(hctx, i); + if (e->ops.init_hctx) { + ret = e->ops.init_hctx(hctx, i); if (ret) { eq = q->elevator; blk_mq_exit_sched(q, eq); @@ -523,14 +522,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) queue_for_each_hw_ctx(q, hctx, i) { blk_mq_debugfs_unregister_sched_hctx(hctx); - if (e->type->ops.mq.exit_hctx && hctx->sched_data) { - e->type->ops.mq.exit_hctx(hctx, i); + if (e->type->ops.exit_hctx && hctx->sched_data) { + e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; } } blk_mq_debugfs_unregister_sched(q); - if (e->type->ops.mq.exit_sched) - e->type->ops.mq.exit_sched(e); + if (e->type->ops.exit_sched) + e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q); q->elevator = NULL; } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 8a9544203173..947f236b273d 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -43,8 +43,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, { struct elevator_queue *e = q->elevator; - if (e && e->type->ops.mq.allow_merge) - return e->type->ops.mq.allow_merge(q, rq, bio); + if (e && e->type->ops.allow_merge) + return e->type->ops.allow_merge(q, rq, bio); return true; } @@ -53,8 +53,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) { struct elevator_queue *e = rq->q->elevator; - if (e && e->type->ops.mq.completed_request) - e->type->ops.mq.completed_request(rq, now); + if (e && e->type->ops.completed_request) + e->type->ops.completed_request(rq, now); } static inline void blk_mq_sched_started_request(struct request *rq) @@ -62,8 +62,8 @@ static inline void blk_mq_sched_started_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.mq.started_request) - e->type->ops.mq.started_request(rq); + if (e && e->type->ops.started_request) + e->type->ops.started_request(rq); } static inline void blk_mq_sched_requeue_request(struct request *rq) @@ -71,16 +71,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.mq.requeue_request) - e->type->ops.mq.requeue_request(rq); + if (e && e->type->ops.requeue_request) + e->type->ops.requeue_request(rq); } static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) { struct elevator_queue *e = hctx->queue->elevator; - if (e && e->type->ops.mq.has_work) - return e->type->ops.mq.has_work(hctx); + if (e && e->type->ops.has_work) + return e->type->ops.has_work(hctx); return false; } diff --git a/block/blk-mq.c b/block/blk-mq.c index a58d2d953876..d106d7a970cc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -363,9 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q, * dispatch list. Don't include reserved tags in the * limiting, as it isn't useful. */ - if (!op_is_flush(op) && e->type->ops.mq.limit_depth && + if (!op_is_flush(op) && e->type->ops.limit_depth && !(data->flags & BLK_MQ_REQ_RESERVED)) - e->type->ops.mq.limit_depth(op, data); + e->type->ops.limit_depth(op, data); } else { blk_mq_tag_busy(data->hctx); } @@ -383,11 +383,11 @@ static struct request *blk_mq_get_request(struct request_queue *q, rq = blk_mq_rq_ctx_init(data, tag, op); if (!op_is_flush(op)) { rq->elv.icq = NULL; - if (e && e->type->ops.mq.prepare_request) { + if (e && e->type->ops.prepare_request) { if (e->type->icq_cache && rq_ioc(bio)) blk_mq_sched_assign_ioc(rq, bio); - e->type->ops.mq.prepare_request(rq, bio); + e->type->ops.prepare_request(rq, bio); rq->rq_flags |= RQF_ELVPRIV; } } @@ -491,8 +491,8 @@ void blk_mq_free_request(struct request *rq) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); if (rq->rq_flags & RQF_ELVPRIV) { - if (e && e->type->ops.mq.finish_request) - e->type->ops.mq.finish_request(rq); + if (e && e->type->ops.finish_request) + e->type->ops.finish_request(rq); if (rq->elv.icq) { put_io_context(rq->elv.icq->ioc); rq->elv.icq = NULL; diff --git a/block/elevator.c b/block/elevator.c index 334097c54b08..19351ffa56b1 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -61,8 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e->type->ops.mq.allow_merge) - return e->type->ops.mq.allow_merge(q, rq, bio); + if (e->type->ops.allow_merge) + return e->type->ops.allow_merge(q, rq, bio); return 1; } @@ -180,7 +180,7 @@ static void elevator_release(struct kobject *kobj) void elevator_exit(struct request_queue *q, struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); - if (e->type->ops.mq.exit_sched) + if (e->type->ops.exit_sched) blk_mq_exit_sched(q, e); mutex_unlock(&e->sysfs_lock); @@ -329,8 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req, return ELEVATOR_BACK_MERGE; } - if (e->type->ops.mq.request_merge) - return e->type->ops.mq.request_merge(q, req, bio); + if (e->type->ops.request_merge) + return e->type->ops.request_merge(q, req, bio); return ELEVATOR_NO_MERGE; } @@ -381,8 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq, { struct elevator_queue *e = q->elevator; - if (e->type->ops.mq.request_merged) - e->type->ops.mq.request_merged(q, rq, type); + if (e->type->ops.request_merged) + e->type->ops.request_merged(q, rq, type); if (type == ELEVATOR_BACK_MERGE) elv_rqhash_reposition(q, rq); @@ -396,8 +396,8 @@ void elv_merge_requests(struct request_queue *q, struct request *rq, struct elevator_queue *e = q->elevator; bool next_sorted = false; - if (e->type->ops.mq.requests_merged) - e->type->ops.mq.requests_merged(q, rq, next); + if (e->type->ops.requests_merged) + e->type->ops.requests_merged(q, rq, next); elv_rqhash_reposition(q, rq); @@ -413,8 +413,8 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - if (e->type->ops.mq.next_request) - return e->type->ops.mq.next_request(q, rq); + if (e->type->ops.next_request) + return e->type->ops.next_request(q, rq); return NULL; } @@ -423,8 +423,8 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - if (e->type->ops.mq.former_request) - return e->type->ops.mq.former_request(q, rq); + if (e->type->ops.former_request) + return e->type->ops.former_request(q, rq); return NULL; } diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 728757a34fa0..1fd83a91e749 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -1017,7 +1017,7 @@ static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { #endif static struct elevator_type kyber_sched = { - .ops.mq = { + .ops = { .init_sched = kyber_init_sched, .exit_sched = kyber_exit_sched, .init_hctx = kyber_init_hctx, diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 513edefd10fd..1bd06cefce57 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -761,7 +761,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { #endif static struct elevator_type mq_deadline = { - .ops.mq = { + .ops = { .insert_requests = dd_insert_requests, .dispatch_request = dd_dispatch_request, .prepare_request = dd_prepare_request, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 158004f1754d..2e9e2763bf47 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -69,9 +69,7 @@ struct elevator_type struct kmem_cache *icq_cache; /* fields provided by elevator implementation */ - union { - struct elevator_mq_ops mq; - } ops; + struct elevator_mq_ops ops; size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ |