summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-15 12:22:51 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-16 08:34:06 -0700
commit344e9ffcbd1898e1dc04085564a6e05c30ea8199 (patch)
treeba71320bc66d1158790acf1cdeedd21d2da9dead /block/blk-core.c
parentdabcefab45d36ecb5a22f16577bb0f298876a22d (diff)
downloadlinux-344e9ffcbd1898e1dc04085564a6e05c30ea8199.tar.gz
linux-344e9ffcbd1898e1dc04085564a6e05c30ea8199.tar.bz2
linux-344e9ffcbd1898e1dc04085564a6e05c30ea8199.zip
block: add queue_is_mq() helper
Various spots check for q->mq_ops being non-NULL, but provide a helper to do this instead. Where the ->mq_ops != NULL check is redundant, remove it. Since mq == rq-based now that legacy is gone, get rid of the queue_is_rq_based() and just use queue_is_mq() everywhere. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 92b6b200e9fb..0b684a520a11 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -232,7 +232,7 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
@@ -281,7 +281,7 @@ void blk_set_queue_dying(struct request_queue *q)
*/
blk_freeze_queue_start(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_wake_waiters(q);
/* Make blk_queue_enter() reexamine the DYING flag. */
@@ -356,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
* blk_freeze_queue() should be enough for cases of passthrough
* request.
*/
- if (q->mq_ops && blk_queue_init_done(q))
+ if (queue_is_mq(q) && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -374,7 +374,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_exit_queue(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_free_queue(q);
percpu_ref_exit(&q->q_usage_counter);
@@ -982,7 +982,7 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if (should_fail_bio(bio))
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
*/
int blk_lld_busy(struct request_queue *q)
{
- if (q->mq_ops && q->mq_ops->busy)
+ if (queue_is_mq(q) && q->mq_ops->busy)
return q->mq_ops->busy(q);
return 0;