summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-03-18 22:00:34 +0000
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:24 -0600
commit0667db14e1f029d56243aa2509ebc5f944388200 (patch)
tree2aa5648ca732dd782c9479858e9598d1749310db /io_uring/io_uring.c
parent23fbdde6205d9351bb52a4b8f11ec38bdbc8561a (diff)
downloadlinux-0667db14e1f029d56243aa2509ebc5f944388200.tar.gz
linux-0667db14e1f029d56243aa2509ebc5f944388200.tar.bz2
linux-0667db14e1f029d56243aa2509ebc5f944388200.zip
io_uring: refactor io_req_complete_post()
Make io_req_complete_post() to push all IORING_SETUP_IOPOLL requests to task_work, it's much cleaner and should normally happen. We couldn't do it before because there was a possibility of looping in complete_post() -> tw -> complete_post() -> ... Also, unexport the function and inline __io_req_complete_post(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Tested-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/ea19c032ace3e0dd96ac4d991a063b0188037014.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a0073625ff16..951ff3b787ab 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -920,11 +920,21 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
return posted;
}
-static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
+static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *rsrc_node = NULL;
+ /*
+ * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
+ * the submitter task context, IOPOLL protects with uring_lock.
+ */
+ if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req);
+ return;
+ }
+
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP)) {
if (!io_fill_cqe_req(ctx, req))
@@ -968,23 +978,6 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
}
}
-void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (ctx->task_complete) {
- req->io_task_work.func = io_req_task_complete;
- io_req_task_work_add(req);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
- !(ctx->flags & IORING_SETUP_IOPOLL)) {
- __io_req_complete_post(req, issue_flags);
- } else {
- mutex_lock(&ctx->uring_lock);
- __io_req_complete_post(req, issue_flags & ~IO_URING_F_UNLOCKED);
- mutex_unlock(&ctx->uring_lock);
- }
-}
-
void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock)
{