summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-11-11 16:54:09 +0000
committerJens Axboe <axboe@kernel.dk>2022-11-21 07:44:21 -0700
commitd75936062049522172a107c994242b76c89777f9 (patch)
treec1005454a0ac3d0b9674012bd913cc66c447f1bc /io_uring
parente52d2e583e4ad1d5d0b804d79c2b8752eb0e5ceb (diff)
downloadlinux-stable-d75936062049522172a107c994242b76c89777f9.tar.gz
linux-stable-d75936062049522172a107c994242b76c89777f9.tar.bz2
linux-stable-d75936062049522172a107c994242b76c89777f9.zip
io_uring: split tw fallback into a function
When the target process is dying and so task_work_add() is not allowed we push all task_work item to the fallback workqueue. Move the part responsible for moving tw items out of __io_req_task_work_add() into a separate function. Makes it a bit cleaner and gives the compiler a bit of extra info. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e503dab9d7af95470ca6b214c6de17715ae4e748.1668162751.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 8f452dfb4f1c..9925ac08c398 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1105,6 +1105,20 @@ void tctx_task_work(struct callback_head *cb)
trace_io_uring_task_work_run(tctx, count, loops);
}
+static __cold void io_fallback_tw(struct io_uring_task *tctx)
+{
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+ struct io_kiocb *req;
+
+ while (node) {
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ node = node->next;
+ if (llist_add(&req->io_task_work.node,
+ &req->ctx->fallback_llist))
+ schedule_delayed_work(&req->ctx->fallback_work, 1);
+ }
+}
+
static void io_req_local_work_add(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -1131,7 +1145,6 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
{
struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx;
- struct llist_node *node;
if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
io_req_local_work_add(req);
@@ -1148,15 +1161,7 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
- node = llist_del_all(&tctx->task_list);
-
- while (node) {
- req = container_of(node, struct io_kiocb, io_task_work.node);
- node = node->next;
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
- }
+ io_fallback_tw(tctx);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)