diff options
author | Tejun Heo <tj@kernel.org> | 2011-12-14 00:33:37 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2011-12-14 00:33:37 +0100 |
commit | 1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55 (patch) | |
tree | 1e8a8d5cfdfeff0f92490985bd125ab6666673b0 /block | |
parent | dc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff) | |
download | linux-stable-1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55.tar.gz linux-stable-1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55.tar.bz2 linux-stable-1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55.zip |
block, sx8: kill blk_insert_request()
The only user left for blk_insert_request() is sx8 and it can be
trivially switched to use blk_execute_rq_nowait() - special requests
aren't included in io stat and sx8 doesn't use block layer tagging.
Switch sx8 and kill blk_insert_requeset().
This patch doesn't introduce any functional difference.
Only compile tested.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Jeff Garzik <jgarzik@pobox.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 48 |
1 files changed, 0 insertions, 48 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index ea70e6c80cd3..435af2378614 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1010,54 +1010,6 @@ static void add_acct_request(struct request_queue *q, struct request *rq, __elv_add_request(q, rq, where); } -/** - * blk_insert_request - insert a special request into a request queue - * @q: request queue where request should be inserted - * @rq: request to be inserted - * @at_head: insert request at head or tail of queue - * @data: private data - * - * Description: - * Many block devices need to execute commands asynchronously, so they don't - * block the whole kernel from preemption during request execution. This is - * accomplished normally by inserting aritficial requests tagged as - * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them - * be scheduled for actual execution by the request queue. - * - * We have the option of inserting the head or the tail of the queue. - * Typically we use the tail for new ioctls and so forth. We use the head - * of the queue for things like a QUEUE_FULL message from a device, or a - * host that is unable to accept a particular command. - */ -void blk_insert_request(struct request_queue *q, struct request *rq, - int at_head, void *data) -{ - int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; - unsigned long flags; - - /* - * tell I/O scheduler that this isn't a regular read/write (ie it - * must not attempt merges on this) and that it acts as a soft - * barrier - */ - rq->cmd_type = REQ_TYPE_SPECIAL; - - rq->special = data; - - spin_lock_irqsave(q->queue_lock, flags); - - /* - * If command is tagged, release the tag - */ - if (blk_rq_tagged(rq)) - blk_queue_end_tag(q, rq); - - add_acct_request(q, rq, where); - __blk_run_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); -} -EXPORT_SYMBOL(blk_insert_request); - static void part_round_stats_single(int cpu, struct hd_struct *part, unsigned long now) { |