diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-02-10 00:03:18 +0000 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-02-10 07:28:43 -0700 |
commit | 6ff119a6e4c3fe900e75e6667930dc086f185f2b (patch) | |
tree | 8be5484f7b20fb5c3655c297abea4db529bcf1bf | |
parent | bf019da7fcbe7e42372582cc339fd1fb8e1e4fa5 (diff) | |
download | linux-stable-6ff119a6e4c3fe900e75e6667930dc086f185f2b.tar.gz linux-stable-6ff119a6e4c3fe900e75e6667930dc086f185f2b.tar.bz2 linux-stable-6ff119a6e4c3fe900e75e6667930dc086f185f2b.zip |
io_uring: feed reqs back into alloc cache
Make io_req_free_batch(), which is used for inline executed requests and
IOPOLL, to return requests back into the allocation cache, so avoid
most of kmalloc()/kfree() for those cases.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | fs/io_uring.c | 19 |
1 files changed, 12 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 1f0b3b332d32..fe07af756186 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -266,7 +266,7 @@ struct io_sq_data { #define IO_IOPOLL_BATCH 8 #define IO_COMPL_BATCH 32 -#define IO_REQ_CACHE_SIZE 8 +#define IO_REQ_CACHE_SIZE 32 #define IO_REQ_ALLOC_BATCH 8 struct io_comp_state { @@ -2264,7 +2264,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx, percpu_ref_put_many(&ctx->refs, rb->ctx_refs); } -static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) +static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, + struct io_submit_state *state) { io_queue_next(req); @@ -2278,9 +2279,13 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) rb->ctx_refs++; io_dismantle_req(req); - rb->reqs[rb->to_free++] = req; - if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) - __io_req_free_batch_flush(req->ctx, rb); + if (state->free_reqs != ARRAY_SIZE(state->reqs)) { + state->reqs[state->free_reqs++] = req; + } else { + rb->reqs[rb->to_free++] = req; + if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) + __io_req_free_batch_flush(req->ctx, rb); + } } static void io_submit_flush_completions(struct io_comp_state *cs, @@ -2305,7 +2310,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs, /* submission and completion refs */ if (refcount_sub_and_test(2, &req->refs)) - io_req_free_batch(&rb, req); + io_req_free_batch(&rb, req, &ctx->submit_state); } io_req_free_batch_finish(ctx, &rb); @@ -2458,7 +2463,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, (*nr_events)++; if (refcount_dec_and_test(&req->refs)) - io_req_free_batch(&rb, req); + io_req_free_batch(&rb, req, &ctx->submit_state); } io_commit_cqring(ctx); |