summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-08-27 16:40:19 -0600
committerJens Axboe <axboe@kernel.dk>2020-08-27 16:40:29 -0600
commiteefdf30f3dcb5c1d47bee2b3afdb9d4d05343ff3 (patch)
treee5811c224ae3d95cc27d3a0d03142f07dc5fdca0 /fs
parent56450c20fe10d4d93f58019109aa4e06fc0b9206 (diff)
downloadlinux-stable-eefdf30f3dcb5c1d47bee2b3afdb9d4d05343ff3.tar.gz
linux-stable-eefdf30f3dcb5c1d47bee2b3afdb9d4d05343ff3.tar.bz2
linux-stable-eefdf30f3dcb5c1d47bee2b3afdb9d4d05343ff3.zip
io_uring: fix IOPOLL -EAGAIN retries
This normally isn't hit, as polling is mostly done on NVMe with deep queue depths. But if we do run into request starvation, we need to ensure that retries are properly serialized. Reported-by: Andres Freund <andres@anarazel.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6df08287c59e..8c77ad4a65f0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file)
+ if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
@@ -3132,6 +3132,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
ret = 0;
@@ -3144,7 +3145,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
- iov_count = iov_iter_count(iter);
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3157,7 +3157,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = 0;
goto out_free;
} else if (ret == -EAGAIN) {
- if (!force_nonblock)
+ /* IOPOLL retry should happen for io-wq threads */
+ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
@@ -3251,6 +3252,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
@@ -3267,7 +3269,6 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
(req->flags & REQ_F_ISREG))
goto copy_iov;
- iov_count = iov_iter_count(iter);
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3301,11 +3302,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
+ /* IOPOLL retry should happen for io-wq threads */
+ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
+ goto copy_iov;
kiocb_done(kiocb, ret2, cs);
} else {
+copy_iov:
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
-copy_iov:
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (!ret)
return -EAGAIN;