summaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-17 06:03:22 -0700
committerJens Axboe <axboe@fb.com>2017-01-17 10:04:20 -0700
commitbd166ef183c263c5ced656d49ef19c7da4adc774 (patch)
tree449bbd3b4e671b370b96e3846b2281116e7089e9 /block/blk-flush.c
parent2af8cbe30531eca73c8f3ba277f155fc0020b01a (diff)
downloadlinux-bd166ef183c263c5ced656d49ef19c7da4adc774.tar.gz
linux-bd166ef183c263c5ced656d49ef19c7da4adc774.tar.bz2
linux-bd166ef183c263c5ced656d49ef19c7da4adc774.zip
blk-mq-sched: add framework for MQ capable IO schedulers
This adds a set of hooks that intercepts the blk-mq path of allocating/inserting/issuing/completing requests, allowing us to develop a scheduler within that framework. We reuse the existing elevator scheduler API on the registration side, but augment that with the scheduler flagging support for the blk-mq interfce, and with a separate set of ops hooks for MQ devices. We split driver and scheduler tags, so we can run the scheduling independently of device queue depth. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Omar Sandoval <osandov@fb.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 20b7c7a02f1c..d7de34ee39c2 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -74,6 +74,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#include "blk-mq-sched.h"
/* FLUSH/FUA sequences */
enum {
@@ -391,9 +392,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io().
*/
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
- blk_mq_run_hw_queue(hctx, true);
+ blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+
+ blk_mq_run_hw_queue(hctx, true);
}
/**
@@ -453,9 +455,9 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- if (q->mq_ops) {
- blk_mq_insert_request(rq, false, true, false);
- } else
+ if (q->mq_ops)
+ blk_mq_sched_insert_request(rq, false, true, false);
+ else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}