diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-06-15 11:23:02 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-06-15 05:06:09 -0600 |
commit | 91ef75a7db0d0855284b78d60d3fcec5c353ec5a (patch) | |
tree | 827980ea8fc1202829b6402731d9207e9e720eb2 /fs/io_uring.c | |
parent | d884b6498d2f022098502e106d5a45ab635f2e9a (diff) | |
download | linux-91ef75a7db0d0855284b78d60d3fcec5c353ec5a.tar.gz linux-91ef75a7db0d0855284b78d60d3fcec5c353ec5a.tar.bz2 linux-91ef75a7db0d0855284b78d60d3fcec5c353ec5a.zip |
io_uring: get rid of __io_fill_cqe{32}_req()
There are too many cqe filling helpers, kill __io_fill_cqe{32}_req(),
use __io_fill_cqe{32}_req_filled() instead, and then rename it. It'll
simplify fixing in following patches.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c18e0d191014fb574f24721245e4e3fddd0b6917.1655287457.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 70 |
1 files changed, 21 insertions, 49 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 1b0b6099e717..654c2f897497 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2464,8 +2464,8 @@ static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data, return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); } -static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { struct io_uring_cqe *cqe; @@ -2486,8 +2486,8 @@ static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx, req->cqe.res, req->cqe.flags, 0, 0); } -static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, - struct io_kiocb *req) +static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx, + struct io_kiocb *req) { struct io_uring_cqe *cqe; u64 extra1 = req->extra1; @@ -2513,44 +2513,6 @@ static inline bool __io_fill_cqe32_req_filled(struct io_ring_ctx *ctx, req->cqe.flags, extra1, extra2); } -static inline bool __io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) -{ - trace_io_uring_complete(req->ctx, req, req->cqe.user_data, res, cflags, 0, 0); - return __io_fill_cqe(req->ctx, req->cqe.user_data, res, cflags); -} - -static inline void __io_fill_cqe32_req(struct io_kiocb *req, s32 res, u32 cflags, - u64 extra1, u64 extra2) -{ - struct io_ring_ctx *ctx = req->ctx; - struct io_uring_cqe *cqe; - - if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_CQE32))) - return; - if (req->flags & REQ_F_CQE_SKIP) - return; - - trace_io_uring_complete(ctx, req, req->cqe.user_data, res, cflags, - extra1, extra2); - - /* - * If we can't get a cq entry, userspace overflowed the - * submission (by quite a lot). Increment the overflow count in - * the ring. - */ - cqe = io_get_cqe(ctx); - if (likely(cqe)) { - WRITE_ONCE(cqe->user_data, req->cqe.user_data); - WRITE_ONCE(cqe->res, res); - WRITE_ONCE(cqe->flags, cflags); - WRITE_ONCE(cqe->big_cqe[0], extra1); - WRITE_ONCE(cqe->big_cqe[1], extra2); - return; - } - - io_cqring_event_overflow(ctx, req->cqe.user_data, res, cflags, extra1, extra2); -} - static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { @@ -2593,16 +2555,24 @@ static void __io_req_complete_put(struct io_kiocb *req) static void __io_req_complete_post(struct io_kiocb *req, s32 res, u32 cflags) { - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(req, res, cflags); + if (!(req->flags & REQ_F_CQE_SKIP)) { + req->cqe.res = res; + req->cqe.flags = cflags; + __io_fill_cqe_req(req->ctx, req); + } __io_req_complete_put(req); } static void __io_req_complete_post32(struct io_kiocb *req, s32 res, u32 cflags, u64 extra1, u64 extra2) { - if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe32_req(req, res, cflags, extra1, extra2); + if (!(req->flags & REQ_F_CQE_SKIP)) { + req->cqe.res = res; + req->cqe.flags = cflags; + req->extra1 = extra1; + req->extra2 = extra2; + __io_fill_cqe32_req(req->ctx, req); + } __io_req_complete_put(req); } @@ -3207,9 +3177,9 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) if (!(req->flags & REQ_F_CQE_SKIP)) { if (!(ctx->flags & IORING_SETUP_CQE32)) - __io_fill_cqe_req_filled(ctx, req); + __io_fill_cqe_req(ctx, req); else - __io_fill_cqe32_req_filled(ctx, req); + __io_fill_cqe32_req(ctx, req); } } @@ -3329,7 +3299,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) nr_events++; if (unlikely(req->flags & REQ_F_CQE_SKIP)) continue; - __io_fill_cqe_req(req, req->cqe.res, io_put_kbuf(req, 0)); + + req->cqe.flags = io_put_kbuf(req, 0); + __io_fill_cqe_req(req->ctx, req); } if (unlikely(!nr_events)) |