summaryrefslogtreecommitdiffstats
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-03-18 22:00:25 +0000
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:24 -0600
commite1eef2e56cb0db143c731b1cdc220980256d2d99 (patch)
treef8cc3baae991eb9d22992f2525f49c3aa2d41acd /io_uring/uring_cmd.c
parent6edd953b6ec758c98e9dba7234634831f1f6510d (diff)
downloadlinux-e1eef2e56cb0db143c731b1cdc220980256d2d99.tar.gz
linux-e1eef2e56cb0db143c731b1cdc220980256d2d99.tar.bz2
linux-e1eef2e56cb0db143c731b1cdc220980256d2d99.zip
io_uring/cmd: fix tw <-> issue_flags conversion
!IO_URING_F_UNLOCKED does not translate to availability of the deferred completion infra, IO_URING_F_COMPLETE_DEFER does, that what we should pass and look for to use io_req_complete_defer() and other variants. Luckily, it's not a real problem as two wrongs actually made it right, at least as far as io_uring_cmd_work() goes. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Tested-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/aef76d34fe9410df8ecc42a14544fd76cd9d8b9e.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 7c1c58c5837e..759f919b14a9 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -36,7 +36,8 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
/* ->sqe isn't available if no async data */
if (!req_has_async_data(req))
cmd->sqe = NULL;
- file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
+ file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
+ IO_URING_F_COMPLETE_DEFER);
ret = true;
}
}
@@ -86,7 +87,11 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
+ unsigned issue_flags = IO_URING_F_UNLOCKED;
+
+ /* locked task_work executor checks the deffered list completion */
+ if (ts->locked)
+ issue_flags = IO_URING_F_COMPLETE_DEFER;
ioucmd->task_work_cb(ioucmd, issue_flags);
}
@@ -130,7 +135,9 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
+ if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
+ return;
io_req_complete_defer(req);
} else {
req->io_task_work.func = io_req_task_complete;