summaryrefslogtreecommitdiffstats
path: root/block/elevator.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-20 15:12:13 +0200
committerJens Axboe <axboe@fb.com>2016-10-28 08:45:17 -0600
commite806402130c9c494e22c73ae9ead4e79d2a5811c (patch)
treebac59e1eb3f1b5945409bd0780a4824e9b8383f8 /block/elevator.c
parent8d2bbd4c8236e9e38e6b36ac9e2c54fdcfe5b335 (diff)
downloadlinux-stable-e806402130c9c494e22c73ae9ead4e79d2a5811c.tar.gz
linux-stable-e806402130c9c494e22c73ae9ead4e79d2a5811c.tar.bz2
linux-stable-e806402130c9c494e22c73ae9ead4e79d2a5811c.zip
block: split out request-only flags into a new namespace
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a56fd7..ac80f89a0842 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -245,7 +245,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
- rq->cmd_flags &= ~REQ_HASHED;
+ rq->rq_flags &= ~RQF_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -260,7 +260,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
- rq->cmd_flags |= REQ_HASHED;
+ rq->rq_flags |= RQF_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
@@ -352,7 +352,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
- int stop_flags;
if (q->last_merge == rq)
q->last_merge = NULL;
@@ -362,7 +361,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--;
boundary = q->end_sector;
- stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
@@ -370,7 +368,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (rq_data_dir(rq) != rq_data_dir(pos))
break;
- if (pos->cmd_flags & stop_flags)
+ if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
break;
if (blk_rq_pos(rq) >= boundary) {
if (blk_rq_pos(pos) < boundary)
@@ -510,7 +508,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
- const int next_sorted = next->cmd_flags & REQ_SORTED;
+ const int next_sorted = next->rq_flags & RQF_SORTED;
if (next_sorted && e->type->ops.elevator_merge_req_fn)
e->type->ops.elevator_merge_req_fn(q, rq, next);
@@ -537,13 +535,13 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
#ifdef CONFIG_PM
static void blk_pm_requeue_request(struct request *rq)
{
- if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--;
}
static void blk_pm_add_request(struct request_queue *q, struct request *rq)
{
- if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+ if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
(q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
@@ -563,11 +561,11 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if (rq->cmd_flags & REQ_SORTED)
+ if (rq->rq_flags & RQF_SORTED)
elv_deactivate_rq(q, rq);
}
- rq->cmd_flags &= ~REQ_STARTED;
+ rq->rq_flags &= ~RQF_STARTED;
blk_pm_requeue_request(rq);
@@ -597,13 +595,13 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q;
- if (rq->cmd_flags & REQ_SOFTBARRIER) {
+ if (rq->rq_flags & RQF_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
- } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
+ } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
(where == ELEVATOR_INSERT_SORT ||
where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
@@ -611,12 +609,12 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
switch (where) {
case ELEVATOR_INSERT_REQUEUE:
case ELEVATOR_INSERT_FRONT:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_BACK:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
/*
@@ -642,7 +640,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
- rq->cmd_flags |= REQ_SORTED;
+ rq->rq_flags |= RQF_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
@@ -659,7 +657,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
break;
case ELEVATOR_INSERT_FLUSH:
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
blk_insert_flush(rq);
break;
default:
@@ -735,7 +733,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
*/
if (blk_account_rq(rq)) {
q->in_flight[rq_is_sync(rq)]--;
- if ((rq->cmd_flags & REQ_SORTED) &&
+ if ((rq->rq_flags & RQF_SORTED) &&
e->type->ops.elevator_completed_req_fn)
e->type->ops.elevator_completed_req_fn(q, rq);
}