summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-11-05 14:06:19 +0000
committerJens Axboe <axboe@kernel.dk>2020-11-05 09:15:24 -0700
commitef9865a442286e2737f37f56eb54c12ef8465905 (patch)
tree453e5741e011199802f722057090d626b0807741 /fs
parent99b328084f6a98bcee9fcd423c82ccfd52115da5 (diff)
downloadlinux-ef9865a442286e2737f37f56eb54c12ef8465905.tar.gz
linux-ef9865a442286e2737f37f56eb54c12ef8465905.tar.bz2
linux-ef9865a442286e2737f37f56eb54c12ef8465905.zip
io_uring: don't forget to task-cancel drained reqs
If there is a long-standing request of one task locking up execution of deferred requests, and the defer list contains requests of another task (all files-less), then a potential execution of __io_uring_task_cancel() by that another task will sleep until that first long-standing request completion, and that may take long. E.g. tsk1: req1/read(empty_pipe) -> tsk2: req(DRAIN) Then __io_uring_task_cancel(tsk2) waits for req1 completion. It seems we even can manufacture a complicated case with many tasks sharing many rings that can lock them forever. Cancel deferred requests for __io_uring_task_cancel() as well. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d6f7f8b3837f..3d489cf31926 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8496,6 +8496,7 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
}
static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct task_struct *task,
struct files_struct *files)
{
struct io_defer_entry *de = NULL;
@@ -8503,7 +8504,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_files(de->req, files)) {
+ if (io_task_match(de->req, task) &&
+ io_match_files(de->req, files)) {
list_cut_position(&list, &ctx->defer_list, &de->list);
break;
}
@@ -8529,7 +8531,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
if (list_empty_careful(&ctx->inflight_list))
return false;
- io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
@@ -8621,6 +8622,11 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
io_sq_thread_park(ctx->sq_data);
}
+ if (files)
+ io_cancel_defer_files(ctx, NULL, files);
+ else
+ io_cancel_defer_files(ctx, task, NULL);
+
io_cqring_overflow_flush(ctx, true, task, files);
while (__io_uring_cancel_task_requests(ctx, task, files)) {