summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-07-05 15:04:17 -0600
committerJens Axboe <axboe@kernel.dk>2020-07-05 15:04:17 -0600
commit58c6a581decbcdd7f49eb7cab27ee14cef247fd5 (patch)
tree8fc9e7bfb99cf1b5cdd59e4e5df8af91c0c83f40 /fs/io_uring.c
parent8eb06d7e8dd853d70668617dda57de4f6cebe651 (diff)
parentb7db41c9e03b5189bc94993bd50e4506ac9e34c1 (diff)
downloadlinux-58c6a581decbcdd7f49eb7cab27ee14cef247fd5.tar.gz
linux-58c6a581decbcdd7f49eb7cab27ee14cef247fd5.tar.bz2
linux-58c6a581decbcdd7f49eb7cab27ee14cef247fd5.zip
Merge branch 'io_uring-5.8' into for-5.9/io_uring
Pull in task_work changes from the 5.8 series, as we'll need to apply the same kind of changes to other parts in the 5.9 branch. * io_uring-5.8: io_uring: fix regression with always ignoring signals in io_cqring_wait() io_uring: use signal based task_work running task_work: teach task_work_add() to do signal_wake_up()
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index cfad2acd4d86..7426e4f23f9b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4417,6 +4417,29 @@ struct io_poll_table {
int error;
};
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+ struct task_struct *tsk = req->task;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, notify = TWA_RESUME;
+
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup.
+ * If we're not using an eventfd, then TWA_RESUME is always fine,
+ * as we won't have dependencies between request completions for
+ * other kernel wait conditions.
+ */
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ notify = 0;
+ else if (ctx->cq_ev_fd)
+ notify = TWA_SIGNAL;
+
+ ret = task_work_add(tsk, cb, notify);
+ if (!ret)
+ wake_up_process(tsk);
+ return ret;
+}
+
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
@@ -4440,13 +4463,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = task_work_add(tsk, &req->task_work, true);
+ ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) {
WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, true);
+ task_work_add(tsk, &req->task_work, 0);
+ wake_up_process(tsk);
}
- wake_up_process(tsk);
return 1;
}
@@ -6486,15 +6509,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
do {
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
+ /* make sure we run task_work before checking for signals */
if (current->task_works)
task_work_run();
- if (io_should_wake(&iowq, false))
- break;
- schedule();
if (signal_pending(current)) {
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(&current->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ continue;
+ }
ret = -EINTR;
break;
}
+ if (io_should_wake(&iowq, false))
+ break;
+ schedule();
} while (1);
finish_wait(&ctx->wait, &iowq.wq);