diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-11-03 10:23:38 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-11-06 13:55:38 -0700 |
commit | b6f58a3f4aa8dba424356c7a69388a81f4459300 (patch) | |
tree | 762afa454110f88f4ef7d5e0b7530486710ad8fa /io_uring/timeout.c | |
parent | 6ed368cc5d5d255ffffad33cfa02ecf2b77b7c44 (diff) | |
download | linux-b6f58a3f4aa8dba424356c7a69388a81f4459300.tar.gz linux-b6f58a3f4aa8dba424356c7a69388a81f4459300.tar.bz2 linux-b6f58a3f4aa8dba424356c7a69388a81f4459300.zip |
io_uring: move struct io_kiocb from task_struct to io_uring_task
Rather than store the task_struct itself in struct io_kiocb, store
the io_uring specific task_struct. The life times are the same in terms
of io_uring, and this avoids doing some dereferences through the
task_struct. For the hot path of putting local task references, we can
deref req->tctx instead, which we'll need anyway in that function
regardless of whether it's local or remote references.
This is mostly straight forward, except the original task PF_EXITING
check needs a bit of tweaking. task_work is _always_ run from the
originating task, except in the fallback case, where it's run from a
kernel thread. Replace the potentially racy (in case of fallback work)
checks for req->task->flags with current->flags. It's either the still
the original task, in which case PF_EXITING will be sane, or it has
PF_KTHREAD set, in which case it's fallback work. Both cases should
prevent moving forward with the given request.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/timeout.c')
-rw-r--r-- | io_uring/timeout.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 18286cb53a69..5b12bd6a804c 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -300,16 +300,18 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t { struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); struct io_kiocb *prev = timeout->prev; - int ret = -ENOENT; + int ret; if (prev) { - if (!(req->task->flags & PF_EXITING)) { + if (!io_should_terminate_tw()) { struct io_cancel_data cd = { .ctx = req->ctx, .data = prev->cqe.user_data, }; - ret = io_try_cancel(req->task->io_uring, &cd, 0); + ret = io_try_cancel(req->tctx, &cd, 0); + } else { + ret = -ECANCELED; } io_req_set_res(req, ret ?: -ETIME, 0); io_req_task_complete(req, ts); @@ -643,7 +645,7 @@ static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx, { struct io_kiocb *req; - if (tctx && head->task->io_uring != tctx) + if (tctx && head->tctx != tctx) return false; if (cancel_all) return true; |