summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@meta.com>2023-01-27 02:59:11 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-02-01 08:34:42 +0100
commit5e6618e28b3336928aaa402f6e6dbdeffabad837 (patch)
tree83e9055ba3c52248125fd794fdb1eb2d8359f52e /io_uring
parent3238146fcf26f9600be0d8f9b1c96012442e8974 (diff)
downloadlinux-stable-5e6618e28b3336928aaa402f6e6dbdeffabad837.tar.gz
linux-stable-5e6618e28b3336928aaa402f6e6dbdeffabad837.tar.bz2
linux-stable-5e6618e28b3336928aaa402f6e6dbdeffabad837.zip
io_uring: always prep_async for drain requests
[ Upstream commit ef5c600adb1d985513d2b612cc90403a148ff287 ] Drain requests all go through io_drain_req, which has a quick exit in case there is nothing pending (ie the drain is not useful). In that case it can run the issue the request immediately. However for safety it queues it through task work. The problem is that in this case the request is run asynchronously, but the async work has not been prepared through io_req_prep_async. This has not been a problem up to now, as the task work always would run before returning to userspace, and so the user would not have a chance to race with it. However - with IORING_SETUP_DEFER_TASKRUN - this is no longer the case and the work might be defered, giving userspace a chance to change data being referred to in the request. Instead _always_ prep_async for drain requests, which is simpler anyway and removes this issue. Cc: stable@vger.kernel.org Fixes: c0e0d6ba25f1 ("io_uring: add IORING_SETUP_DEFER_TASKRUN") Signed-off-by: Dylan Yudaken <dylany@meta.com> Link: https://lore.kernel.org/r/20230127105911.2420061-1-dylany@meta.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 13a60f51b283..862e05e6691d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1634,17 +1634,12 @@ queue:
}
spin_unlock(&ctx->completion_lock);
- ret = io_req_prep_async(req);
- if (ret) {
-fail:
- io_req_complete_failed(req, ret);
- return;
- }
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
ret = -ENOMEM;
- goto fail;
+ io_req_complete_failed(req, ret);
+ return;
}
spin_lock(&ctx->completion_lock);
@@ -1918,13 +1913,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
io_req_complete_failed(req, req->cqe.res);
- } else if (unlikely(req->ctx->drain_active)) {
- io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
io_req_complete_failed(req, ret);
+ return;
+ }
+
+ if (unlikely(req->ctx->drain_active))
+ io_drain_req(req);
else
io_queue_iowq(req, NULL);
}