summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-26 08:24:43 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-26 08:25:53 -0700
commit0a1b8b87d064a47fad9ec475316002da28559207 (patch)
tree9bc87a52b3fcc1f476d52ae94d6bb7e69e2bfd94 /block
parente7d943910719b44738e86f91a26a64e3b61ae419 (diff)
downloadlinux-stable-0a1b8b87d064a47fad9ec475316002da28559207.tar.gz
linux-stable-0a1b8b87d064a47fad9ec475316002da28559207.tar.bz2
linux-stable-0a1b8b87d064a47fad9ec475316002da28559207.zip
block: make blk_poll() take a parameter on whether to spin or not
blk_poll() has always kept spinning until it found an IO. This is fine for SYNC polling, since we need to find one request we have pending, but in preparation for ASYNC polling it can be beneficial to just check if we have any entries available or not. Existing callers are converted to pass in 'spin == true', to retain the old behavior. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-mq.c6
2 files changed, 9 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 03c4202b69bf..9af56dbb84f1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1277,19 +1277,22 @@ EXPORT_SYMBOL(submit_bio);
* blk_poll - poll for IO completions
* @q: the queue
* @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
*
* Description:
* Poll for completions on the passed in queue. Returns number of
- * completed entries found.
+ * completed entries found. If @spin is true, then blk_poll will continue
+ * looping until at least one completion is found, unless the task is
+ * otherwise marked running (or we need to reschedule).
*/
-int blk_poll(struct request_queue *q, blk_qc_t cookie)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
if (!q->poll_fn || !blk_qc_t_valid(cookie))
return 0;
if (current->plug)
blk_flush_plug_list(current->plug, false);
- return q->poll_fn(q, cookie);
+ return q->poll_fn(q, cookie, spin);
}
EXPORT_SYMBOL_GPL(blk_poll);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b66cca3ce1e5..c2751f0a3ccc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -38,7 +38,7 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -3352,7 +3352,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return blk_mq_poll_hybrid_sleep(q, hctx, rq);
}
-static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
struct blk_mq_hw_ctx *hctx;
long state;
@@ -3392,7 +3392,7 @@ static int blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
if (current->state == TASK_RUNNING)
return 1;
- if (ret < 0)
+ if (ret < 0 || !spin)
break;
cpu_relax();
}