summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-10 00:03:21 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-10 07:28:43 -0700
commit65453d1efbd20f3825beba2a9c93ffb2ec729ece (patch)
tree389d3828515b115544d7129a685284a46727d885 /fs/io_uring.c
parent7cbf1722d5fc5779946ee8f338e9e38b5de15856 (diff)
downloadlinux-stable-65453d1efbd20f3825beba2a9c93ffb2ec729ece.tar.gz
linux-stable-65453d1efbd20f3825beba2a9c93ffb2ec729ece.tar.bz2
linux-stable-65453d1efbd20f3825beba2a9c93ffb2ec729ece.zip
io_uring: enable req cache for task_work items
task_work is run without utilizing the req alloc cache, so any deferred items don't get to take advantage of either the alloc or free side of it. With task_work now being wrapped by io_uring, we can use the ctx completion state to both use the req cache and the completion flush batching. With this, the only request type that cannot take advantage of the req cache is IRQ driven IO for regular files / block devices. Anything else, including IOPOLL polled IO to those same tyes, will take advantage of it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index bfc8fcd93504..fe8921a728b0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1051,6 +1051,8 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
const struct iovec *fast_iov,
struct iov_iter *iter, bool force);
static void io_req_task_queue(struct io_kiocb *req);
+static void io_submit_flush_completions(struct io_comp_state *cs,
+ struct io_ring_ctx *ctx);
static struct kmem_cache *req_cachep;
@@ -2139,6 +2141,7 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
static bool __tctx_task_work(struct io_uring_task *tctx)
{
+ struct io_ring_ctx *ctx = NULL;
struct io_wq_work_list list;
struct io_wq_work_node *node;
@@ -2153,11 +2156,28 @@ static bool __tctx_task_work(struct io_uring_task *tctx)
node = list.first;
while (node) {
struct io_wq_work_node *next = node->next;
+ struct io_ring_ctx *this_ctx;
struct io_kiocb *req;
req = container_of(node, struct io_kiocb, io_task_work.node);
+ this_ctx = req->ctx;
req->task_work.func(&req->task_work);
node = next;
+
+ if (!ctx) {
+ ctx = this_ctx;
+ } else if (ctx != this_ctx) {
+ mutex_lock(&ctx->uring_lock);
+ io_submit_flush_completions(&ctx->submit_state.comp, ctx);
+ mutex_unlock(&ctx->uring_lock);
+ ctx = this_ctx;
+ }
+ }
+
+ if (ctx && ctx->submit_state.comp.nr) {
+ mutex_lock(&ctx->uring_lock);
+ io_submit_flush_completions(&ctx->submit_state.comp, ctx);
+ mutex_unlock(&ctx->uring_lock);
}
return list.first != NULL;
@@ -2280,7 +2300,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
if (!ctx->sqo_dead &&
!__io_sq_thread_acquire_mm(ctx) &&
!__io_sq_thread_acquire_files(ctx))
- __io_queue_sqe(req, NULL);
+ __io_queue_sqe(req, &ctx->submit_state.comp);
else
__io_req_task_cancel(req, -EFAULT);
mutex_unlock(&ctx->uring_lock);