diff options
author | Mike Christie <mchristi@redhat.com> | 2016-06-05 14:32:25 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-06-07 13:41:38 -0600 |
commit | 28a8f0d317bf225ff15008f5dd66ae16242dd843 (patch) | |
tree | 4ed24aee241907a3612a61f8cc634acd10989c21 /drivers/md | |
parent | a418090aa88b9b531ac1f504d6bb8c0e9b04ccb7 (diff) | |
download | linux-stable-28a8f0d317bf225ff15008f5dd66ae16242dd843.tar.gz linux-stable-28a8f0d317bf225ff15008f5dd66ae16242dd843.tar.bz2 linux-stable-28a8f0d317bf225ff15008f5dd66ae16242dd843.zip |
block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH
To avoid confusion between REQ_OP_FLUSH, which is handled by
request_fn drivers, and upper layers requesting the block layer
perform a flush sequence along with possibly a WRITE, this patch
renames REQ_FLUSH to REQ_PREFLUSH.
Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/journal.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 7 | ||||
-rw-r--r-- | drivers/md/dm-era-target.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-log-writes.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-region-hash.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-stripe.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 8 | ||||
-rw-r--r-- | drivers/md/dm.c | 12 | ||||
-rw-r--r-- | drivers/md/linear.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/md.h | 2 | ||||
-rw-r--r-- | drivers/md/multipath.c | 2 | ||||
-rw-r--r-- | drivers/md/raid0.c | 2 | ||||
-rw-r--r-- | drivers/md/raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/raid10.c | 2 | ||||
-rw-r--r-- | drivers/md/raid5-cache.c | 2 | ||||
-rw-r--r-- | drivers/md/raid5.c | 2 |
22 files changed, 48 insertions, 45 deletions
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index a3c3b309ff4a..6925023e12d4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -631,7 +631,7 @@ static void journal_write_unlocked(struct closure *cl) bio->bi_end_io = journal_write_endio; bio->bi_private = w; bio_set_op_attrs(bio, REQ_OP_WRITE, - REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA); + REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); bch_bio_map(bio, w->data); trace_bcache_journal_write(bio); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 016b0aa7199c..69f16f43f8ab 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl) return bch_data_invalidate(cl); /* - * Journal writes are marked REQ_FLUSH; if the original write was a + * Journal writes are marked REQ_PREFLUSH; if the original write was a * flush, it'll wait on the journal write. */ - bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); + bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA); do { unsigned i; @@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.write_prio = 0; s->iop.error = 0; s->iop.flags = 0; - s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; + s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0; s->iop.wq = bcache_wq; return s; @@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) bch_writeback_add(dc); s->iop.bio = bio; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { /* Also need to send a flush to the backing device */ struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, dc->disk.bio_split); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 540e80eb317d..718744db62df 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) spin_lock_irqsave(&cache->lock, flags); if (cache->need_tick_bio && - !(bio->bi_rw & (REQ_FUA | REQ_FLUSH)) && + !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) && bio_op(bio) != REQ_OP_DISCARD) { pb->tick = true; cache->need_tick_bio = false; @@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) static int bio_triggers_commit(struct cache *cache, struct bio *bio) { - return bio->bi_rw & (REQ_FLUSH | REQ_FUA); + return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } /* @@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache) static bool discard_or_flush(struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || - bio->bi_rw & (REQ_FLUSH | REQ_FUA); + bio->bi_rw & (REQ_PREFLUSH | REQ_FUA); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) @@ -1614,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) remap_to_cache(cache, bio, 0); /* - * REQ_FLUSH is not directed at any particular block so we don't - * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH + * REQ_PREFLUSH is not directed at any particular block so we don't + * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH * by dm-core. */ issue(cache, bio); @@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache) bio = bio_list_pop(&bios); - if (bio->bi_rw & REQ_FLUSH) + if (bio->bi_rw & REQ_PREFLUSH) process_flush_bio(cache, bio); else if (bio_op(bio) == REQ_OP_DISCARD) process_discard_bio(cache, &structs, bio); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 057d19b5e196..96dd5d7e454a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct crypt_config *cc = ti->private; /* - * If bio is REQ_FLUSH or REQ_OP_DISCARD, just bypass crypt queues. - * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight + * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. + * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight * - for REQ_OP_DISCARD caller must use flush if IO ordering matters */ - if (unlikely(bio->bi_rw & REQ_FLUSH || bio_op(bio) == REQ_OP_DISCARD)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH || + bio_op(bio) == REQ_OP_DISCARD)) { bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) bio->bi_iter.bi_sector = cc->start + diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 665bf3285618..2faf49d8f4d7 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio) remap_to_origin(era, bio); /* - * REQ_FLUSH bios carry no data, so we're not interested in them. + * REQ_PREFLUSH bios carry no data, so we're not interested in them. */ - if (!(bio->bi_rw & REQ_FLUSH) && + if (!(bio->bi_rw & REQ_PREFLUSH) && (bio_data_dir(bio) == WRITE) && !metadata_current_marked(era->md, block)) { defer_bio(era, bio); diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 22e0597d631e..0e225fd4a8d1 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -380,7 +380,7 @@ static void dispatch_io(int op, int op_flags, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (op_flags & REQ_FLUSH)) + if (where[i].count || (op_flags & REQ_PREFLUSH)) do_region(op, op_flags, i, where + i, dp, io); } diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 0edb8ea51e46..b5dbf7a0515e 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -555,7 +555,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio) struct bio_vec bv; size_t alloc_size; int i = 0; - bool flush_bio = (bio->bi_rw & REQ_FLUSH); + bool flush_bio = (bio->bi_rw & REQ_PREFLUSH); bool fua_bio = (bio->bi_rw & REQ_FUA); bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 907df2ba8fd4..9f5f460c0e92 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { - if ((bio->bi_rw & REQ_FLUSH) || + if ((bio->bi_rw & REQ_PREFLUSH) || (bio_op(bio) == REQ_OP_DISCARD)) { bio_list_add(&sync, bio); continue; @@ -1253,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (!(bio->bi_rw & REQ_FLUSH) && bio_op(bio) != REQ_OP_DISCARD) + if (!(bio->bi_rw & REQ_PREFLUSH) && + bio_op(bio) != REQ_OP_DISCARD) dm_rh_dec(ms->rh, bio_record->write_region); return error; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 3550ca7c6577..b11813431f31 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { rh->flush_failure = 1; return; } @@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & REQ_FLUSH || bio_op(bio) == REQ_OP_DISCARD) + if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 70bb0e8b62ce..69ab1ff5f5c9 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) init_tracked_chunk(bio); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } @@ -1799,7 +1799,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) init_tracked_chunk(bio); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { if (!dm_bio_get_target_bio_nr(bio)) bio->bi_bdev = s->origin->bdev; else @@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) bio->bi_bdev = o->dev->bdev; - if (unlikely(bio->bi_rw & REQ_FLUSH)) + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) return DM_MAPIO_REMAPPED; if (bio_rw(bio) != WRITE) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b738178ca068..48f1c01d7b9f 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) uint32_t stripe; unsigned target_bio_nr; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { target_bio_nr = dm_bio_get_target_bio_nr(bio); BUG_ON(target_bio_nr >= sc->stripes); bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1b684cbb9ba2..5f9e3d799d66 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -697,7 +697,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) { - return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) && dm_thin_changed_this_transaction(tc->td); } @@ -868,7 +868,7 @@ static void __inc_remap_and_issue_cell(void *context, struct bio *bio; while ((bio = bio_list_pop(&cell->bios))) { - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || bio_op(bio) == REQ_OP_DISCARD) bio_list_add(&info->defer_bios, bio); else { @@ -1641,7 +1641,7 @@ static void __remap_and_issue_shared_cell(void *context, while ((bio = bio_list_pop(&cell->bios))) { if ((bio_data_dir(bio) == WRITE) || - (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || + (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || bio_op(bio) == REQ_OP_DISCARD)) bio_list_add(&info->defer_bios, bio); else { @@ -2556,7 +2556,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA) || + if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) || bio_op(bio) == REQ_OP_DISCARD) { thin_defer_bio_with_throttle(tc, bio); return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index fcc68c8edba0..aba7ed9abb3a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1003,12 +1003,12 @@ static void dec_pending(struct dm_io *io, int error) if (io_error == DM_ENDIO_REQUEUE) return; - if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { + if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) { /* * Preflush done for flush with data, reissue - * without REQ_FLUSH. + * without REQ_PREFLUSH. */ - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; queue_io(md, bio); } else { /* done with normal IO or empty flush */ @@ -1477,7 +1477,7 @@ EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); /* * A target may call dm_accept_partial_bio only from the map routine. It is - * allowed for all bio types except REQ_FLUSH. + * allowed for all bio types except REQ_PREFLUSH. * * dm_accept_partial_bio informs the dm that the target only wants to process * additional n_sectors sectors of the bio and the rest of the data should be @@ -1507,7 +1507,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; - BUG_ON(bio->bi_rw & REQ_FLUSH); + BUG_ON(bio->bi_rw & REQ_PREFLUSH); BUG_ON(bi_size > *tio->len_ptr); BUG_ON(n_sectors > bi_size); *tio->len_ptr -= bi_size - n_sectors; @@ -1795,7 +1795,7 @@ static void __split_and_process_bio(struct mapped_device *md, start_io_acct(ci.io); - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_rw & REQ_PREFLUSH) { ci.bio = &ci.md->flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 1ad3f485672c..70ff888d25d0 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) struct bio *split; sector_t start_sector, end_sector, data_offset; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } diff --git a/drivers/md/md.c b/drivers/md/md.c index bd4844fe0e98..1f123f5a29da 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws) /* an empty barrier - all done */ bio_endio(bio); else { - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; mddev->pers->make_request(mddev, bio); } diff --git a/drivers/md/md.h b/drivers/md/md.h index 2e0918fc376d..b4f335245bd6 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -424,7 +424,7 @@ struct mddev { /* Generic flush handling. * The last to finish preflush schedules a worker to submit - * the rest of the request (without the REQ_FLUSH flag). + * the rest of the request (without the REQ_PREFLUSH flag). */ struct bio *flush_bio; atomic_t flush_pending; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index dd483bb2e111..72ea98e89e57 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -111,7 +111,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 051a10ca4e09..c3d439083212 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) struct md_rdev *tmp_dev; struct bio *split; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a3427230f7cf..10e53cd6a995 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1056,7 +1056,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio) const int op = bio_op(bio); const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); - const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); + const unsigned long do_flush_fua = (bio->bi_rw & + (REQ_PREFLUSH | REQ_FUA)); const unsigned long do_sec = (bio->bi_rw & REQ_SECURE); struct md_rdev *blocked_rdev; struct blk_plug_cb *cb; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 615045a11bac..245640b50153 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1447,7 +1447,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) struct bio *split; - if (unlikely(bio->bi_rw & REQ_FLUSH)) { + if (unlikely(bio->bi_rw & REQ_PREFLUSH)) { md_flush_request(mddev, bio); return; } diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 92651381b094..5504ce2bac06 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) bio_endio(bio); return 0; } - bio->bi_rw &= ~REQ_FLUSH; + bio->bi_rw &= ~REQ_PREFLUSH; return -EAGAIN; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b9122e2c6aa1..7aacf5b55e15 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5150,7 +5150,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) DEFINE_WAIT(w); bool do_prepare; - if (unlikely(bi->bi_rw & REQ_FLUSH)) { + if (unlikely(bi->bi_rw & REQ_PREFLUSH)) { int ret = r5l_handle_flush_request(conf->log, bi); if (ret == 0) |