summaryrefslogtreecommitdiffstats
path: root/io_uring/rw.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-08-24 23:53:29 +0100
committerJens Axboe <axboe@kernel.dk>2023-08-24 17:16:19 -0600
commitec26c225f06f5993f8891fa6c79fab3c92981181 (patch)
treeac8f3e43760f70381360a290840a743955e8278a /io_uring/rw.c
parent54927baf6c195fb512ac38b26a041ca44edb2e29 (diff)
downloadlinux-ec26c225f06f5993f8891fa6c79fab3c92981181.tar.gz
linux-ec26c225f06f5993f8891fa6c79fab3c92981181.tar.bz2
linux-ec26c225f06f5993f8891fa6c79fab3c92981181.zip
io_uring: merge iopoll and normal completion paths
io_do_iopoll() and io_submit_flush_completions() are pretty similar, both filling CQEs and then free a list of requests. Don't duplicate it and make iopoll use __io_submit_flush_completions(), which also helps with inlining and other optimisations. For that, we need to first find all completed iopoll requests and splice them from the iopoll list and then pass it down. This adds one extra list traversal, which should be fine as requests will stay hot in cache. CQ locking is already conditional, introduce ->lockless_cq and skip locking for IOPOLL as it's protected by ->uring_lock. We also add a wakeup optimisation for IOPOLL to __io_cq_unlock_post(), so it works just like io_cqring_ev_posted_iopoll(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/3840473f5e8a960de35b77292026691880f6bdbc.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 20140d3505f1..0a1e515f0510 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -983,13 +983,6 @@ copy_iov:
return ret;
}
-static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
-{
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_cqring_wake(ctx);
- io_commit_cqring_flush(ctx);
-}
-
void io_rw_fail(struct io_kiocb *req)
{
int res;
@@ -1060,24 +1053,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- if (unlikely(req->flags & REQ_F_CQE_SKIP))
- continue;
-
req->cqe.flags = io_put_kbuf(req, 0);
- if (unlikely(!io_fill_cqe_req(ctx, req))) {
- spin_lock(&ctx->completion_lock);
- io_req_cqe_overflow(req);
- spin_unlock(&ctx->completion_lock);
- }
}
-
if (unlikely(!nr_events))
return 0;
- io_commit_cqring(ctx);
- io_cqring_ev_posted_iopoll(ctx);
pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);
- io_free_batch_list(ctx, pos);
+
+ if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
+ return 0;
+ ctx->submit_state.compl_reqs.first = pos;
+ __io_submit_flush_completions(ctx);
return nr_events;
}