diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 3 | ||||
-rw-r--r-- | block/blk-flush.c | 11 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 5 | ||||
-rw-r--r-- | block/blk-settings.c | 38 |
4 files changed, 20 insertions, 37 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c50227796a26..2475b1c72773 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio) * drivers without flush support don't have to worry * about them. */ - if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { + if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); if (!nr_sectors) { err = 0; diff --git a/block/blk-flush.c b/block/blk-flush.c index 9c423e53324a..b1c91d229e5e 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -95,17 +95,18 @@ enum { static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq); -static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) +static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) { unsigned int policy = 0; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; - if (fflags & REQ_FLUSH) { + if (fflags & (1UL << QUEUE_FLAG_WC)) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; - if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) + if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && + (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; @@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error) void blk_insert_flush(struct request *rq) { struct request_queue *q = rq->q; - unsigned int fflags = q->flush_flags; /* may change, cache */ + unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); @@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq) * REQ_FLUSH and FUA for the driver. */ rq->cmd_flags &= ~REQ_FLUSH; - if (!(fflags & REQ_FUA)) + if (!(fflags & (1UL << QUEUE_FLAG_FUA))) rq->cmd_flags &= ~REQ_FUA; /* diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2fd04286f103..56a0c37a3d06 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, } } -void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, - void *priv) +static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, + busy_tag_iter_fn *fn, void *priv) { if (tags->nr_reserved_tags) bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true); bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, false); } -EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) diff --git a/block/blk-settings.c b/block/blk-settings.c index c903bee43cf8..f679ae122843 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -820,29 +820,14 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) } EXPORT_SYMBOL(blk_queue_update_dma_alignment); -/** - * blk_queue_flush - configure queue's cache flush capability - * @q: the request queue for the device - * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA - * - * Tell block layer cache flush capability of @q. If it supports - * flushing, REQ_FLUSH should be set. If it supports bypassing - * write cache for individual writes, REQ_FUA should be set. - */ -void blk_queue_flush(struct request_queue *q, unsigned int flush) -{ - WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA)); - - if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA))) - flush &= ~REQ_FUA; - - q->flush_flags = flush & (REQ_FLUSH | REQ_FUA); -} -EXPORT_SYMBOL_GPL(blk_queue_flush); - void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { - q->flush_not_queueable = !queueable; + spin_lock_irq(q->queue_lock); + if (queueable) + clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + else + set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); @@ -857,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) { spin_lock_irq(q->queue_lock); - if (wc) { + if (wc) queue_flag_set(QUEUE_FLAG_WC, q); - q->flush_flags = REQ_FLUSH; - } else + else queue_flag_clear(QUEUE_FLAG_WC, q); - if (fua) { - if (wc) - q->flush_flags |= REQ_FUA; + if (fua) queue_flag_set(QUEUE_FLAG_FUA, q); - } else + else queue_flag_clear(QUEUE_FLAG_FUA, q); spin_unlock_irq(q->queue_lock); } |