diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 14:32:25 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 13:41:38 -0600 |
commit | 28a8f0d317bf225ff15008f5dd66ae16242dd843 (patch) | |
tree | 4ed24aee241907a3612a61f8cc634acd10989c21 /block | |
parent | a418090aa88b9b531ac1f504d6bb8c0e9b04ccb7 (diff) | |
download | linux-28a8f0d317bf225ff15008f5dd66ae16242dd843.tar.gz linux-28a8f0d317bf225ff15008f5dd66ae16242dd843.tar.bz2 linux-28a8f0d317bf225ff15008f5dd66ae16242dd843.zip |
block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH
To avoid confusion between REQ_OP_FLUSH, which is handled by
request_fn drivers, and upper layers requesting the block layer
perform a flush sequence along with possibly a WRITE, this patch
renames REQ_FLUSH to REQ_PREFLUSH.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 12 | ||||
-rw-r--r-- | block/blk-flush.c | 16 | ||||
-rw-r--r-- | block/blk-mq.c | 4 |
3 files changed, 16 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c7d66c23a708..32a283eb7274 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) * Flush requests do not use the elevator so skip initialization. * This allows a request to share the flush and elevator data. */ - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) return false; return true; @@ -1736,7 +1736,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) { spin_lock_irq(q->queue_lock); where = ELEVATOR_INSERT_FLUSH; goto get_rq; @@ -1968,9 +1968,9 @@ generic_make_request_checks(struct bio *bio) * drivers without flush support don't have to worry * about them. */ - if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { - bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); + bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA); if (!nr_sectors) { err = 0; goto end_io; @@ -2217,7 +2217,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) */ BUG_ON(blk_queued_rq(rq)); - if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) + if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) where = ELEVATOR_INSERT_FLUSH; add_acct_request(q, rq, where); @@ -3311,7 +3311,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) /* * rq is already accounted, so use raw insert */ - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) + if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); else __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); diff --git a/block/blk-flush.c b/block/blk-flush.c index 21f0d5b0d2ca..d308def812db 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -10,8 +10,8 @@ * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request * properties and hardware capability. * - * If a request doesn't have data, only REQ_FLUSH makes sense, which - * indicates a simple flush request. If there is data, REQ_FLUSH indicates + * If a request doesn't have data, only REQ_PREFLUSH makes sense, which + * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates * that the device cache should be flushed before the data is executed, and * REQ_FUA means that the data must be on non-volatile media on request * completion. @@ -20,11 +20,11 @@ * difference. The requests are either completed immediately if there's no * data or executed as normal requests otherwise. * - * If the device has writeback cache and supports FUA, REQ_FLUSH is + * If the device has writeback cache and supports FUA, REQ_PREFLUSH is * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. * - * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is - * translated to PREFLUSH and REQ_FUA to POSTFLUSH. + * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH + * is translated to PREFLUSH and REQ_FUA to POSTFLUSH. * * The actual execution of flush is double buffered. Whenever a request * needs to execute PRE or POSTFLUSH, it queues at @@ -103,7 +103,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) policy |= REQ_FSEQ_DATA; if (fflags & (1UL << QUEUE_FLAG_WC)) { - if (rq->cmd_flags & REQ_FLUSH) + if (rq->cmd_flags & REQ_PREFLUSH) policy |= REQ_FSEQ_PREFLUSH; if (!(fflags & (1UL << QUEUE_FLAG_FUA)) && (rq->cmd_flags & REQ_FUA)) @@ -391,9 +391,9 @@ void blk_insert_flush(struct request *rq) /* * @policy now records what operations need to be done. Adjust - * REQ_FLUSH and FUA for the driver. + * REQ_PREFLUSH and FUA for the driver. */ - rq->cmd_flags &= ~REQ_FLUSH; + rq->cmd_flags &= ~REQ_PREFLUSH; if (!(fflags & (1UL << QUEUE_FLAG_FUA))) rq->cmd_flags &= ~REQ_FUA; diff --git a/block/blk-mq.c b/block/blk-mq.c index 29bcd9c07a34..13f460368759 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1247,7 +1247,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); - const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); struct blk_map_ctx data; struct request *rq; unsigned int request_count = 0; @@ -1344,7 +1344,7 @@ done: static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) { const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw); - const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); struct blk_plug *plug; unsigned int request_count = 0; struct blk_map_ctx data; |