diff options
author | Tejun Heo <tj@kernel.org> | 2008-11-28 13:32:04 +0900 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-29 08:28:45 +0100 |
commit | f671620e7d895af221bdfeda751d54fa55ed9546 (patch) | |
tree | beeb843a4a356d94b6b4faec97e078b2a4ad1f09 /block/blk-barrier.c | |
parent | a7384677b2f4cd40948fd7ce024ba5e1821444ba (diff) | |
download | linux-f671620e7d895af221bdfeda751d54fa55ed9546.tar.gz linux-f671620e7d895af221bdfeda751d54fa55ed9546.tar.bz2 linux-f671620e7d895af221bdfeda751d54fa55ed9546.zip |
block: make every barrier action optional
In all barrier sequences, the barrier write itself was always assumed
to be issued and thus didn't have corresponding control flag. This
patch adds QUEUE_ORDERED_DO_BAR and unify action mask handling in
start_ordered() such that any barrier action can be skipped.
This patch doesn't introduce any visible behavior changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r-- | block/blk-barrier.c | 41 |
1 files changed, 24 insertions, 17 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 43d479a1e664..1efabf829c53 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -158,19 +158,10 @@ static inline struct request *start_ordered(struct request_queue *q, q->ordered = q->next_ordered; q->ordseq |= QUEUE_ORDSEQ_STARTED; - /* - * Prep proxy barrier request. - */ + /* stash away the original request */ elv_dequeue_request(q, rq); q->orig_bar_rq = rq; - rq = &q->bar_rq; - blk_rq_init(q, rq); - if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) - rq->cmd_flags |= REQ_RW; - if (q->ordered & QUEUE_ORDERED_DO_FUA) - rq->cmd_flags |= REQ_FUA; - init_request_from_bio(rq, q->orig_bar_rq->bio); - rq->end_io = bar_end_io; + rq = NULL; /* * Queue ordered sequence. As we stack them at the head, we @@ -181,12 +172,28 @@ static inline struct request *start_ordered(struct request_queue *q, * there will be no data written between the pre and post flush. * Hence a single flush will suffice. */ - if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) + if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && + !blk_empty_barrier(q->orig_bar_rq)) { queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); - else + rq = &q->post_flush_rq; + } else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; - elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + if (q->ordered & QUEUE_ORDERED_DO_BAR) { + rq = &q->bar_rq; + + /* initialize proxy request and queue it */ + blk_rq_init(q, rq); + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_RW; + if (q->ordered & QUEUE_ORDERED_DO_FUA) + rq->cmd_flags |= REQ_FUA; + init_request_from_bio(rq, q->orig_bar_rq->bio); + rq->end_io = bar_end_io; + + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + } else + q->ordseq |= QUEUE_ORDSEQ_BAR; if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); @@ -194,10 +201,10 @@ static inline struct request *start_ordered(struct request_queue *q, } else q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; - if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) - q->ordseq |= QUEUE_ORDSEQ_DRAIN; - else + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) rq = NULL; + else + q->ordseq |= QUEUE_ORDSEQ_DRAIN; return rq; } |