summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c78042975737..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work);
}
+static void blk_timeout_work(struct work_struct *work)
+{
+}
+
/**
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
- INIT_WORK(&q->timeout_work, NULL);
+ INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
@@ -661,7 +665,6 @@ no_merge:
* blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
- * @request_count: out parameter for number of traversed plugged requests
* @same_queue_rq: pointer to &struct request that gets filled in when
* another request associated with @q is found on the plug list
* (optional, may be %NULL)
@@ -1683,6 +1686,15 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
* @plug: The &struct blk_plug that needs to be initialized
*
* Description:
+ * blk_start_plug() indicates to the block layer an intent by the caller
+ * to submit multiple I/O requests in a batch. The block layer may use
+ * this hint to defer submitting I/Os from the caller until blk_finish_plug()
+ * is called. However, the block layer may choose to submit requests
+ * before a call to blk_finish_plug() if the number of queued I/Os
+ * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
+ * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
+ * the task schedules (see below).
+ *
* Tracking blk_plug inside the task_struct will help with auto-flushing the
* pending I/O should the task end up blocking between blk_start_plug() and
* blk_finish_plug(). This is important from a performance perspective, but
@@ -1765,6 +1777,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
blk_mq_flush_plug_list(plug, from_schedule);
}
+/**
+ * blk_finish_plug - mark the end of a batch of submitted I/O
+ * @plug: The &struct blk_plug passed to blk_start_plug()
+ *
+ * Description:
+ * Indicate that a batch of I/O submissions is complete. This function
+ * must be paired with an initial call to blk_start_plug(). The intent
+ * is to allow the block layer to optimize I/O submission. See the
+ * documentation for blk_start_plug() for more information.
+ */
void blk_finish_plug(struct blk_plug *plug)
{
if (plug != current->plug)