diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-08-23 11:00:37 -0600 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2020-09-03 11:29:50 +0200 |
commit | b7e397bcba0a0f8cc7aee8c8d9b5aa9a82c4fbe7 (patch) | |
tree | 4c478e6ba50c7554d80c91e935918fa60285f363 | |
parent | b727b92fd33b7d09b986cfb043d71a47ae362633 (diff) | |
download | linux-stable-b7e397bcba0a0f8cc7aee8c8d9b5aa9a82c4fbe7.tar.gz linux-stable-b7e397bcba0a0f8cc7aee8c8d9b5aa9a82c4fbe7.tar.bz2 linux-stable-b7e397bcba0a0f8cc7aee8c8d9b5aa9a82c4fbe7.zip |
io_uring: don't recurse on tsk->sighand->siglock with signalfd
[ Upstream commit fd7d6de2241453fc7d042336d366a939a25bc5a9 ]
If an application is doing reads on signalfd, and we arm the poll handler
because there's no data available, then the wakeup can recurse on the
tasks sighand->siglock as the signal delivery from task_work_add() will
use TWA_SIGNAL and that attempts to lock it again.
We can detect the signalfd case pretty easily by comparing the poll->head
wait_queue_head_t with the target task signalfd wait queue. Just use
normal task wakeup for this case.
Cc: stable@vger.kernel.org # v5.7+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | fs/io_uring.c | 16 |
1 files changed, 13 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index b966e2b8a77d..c384caad6466 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4114,7 +4114,8 @@ struct io_poll_table { int error; }; -static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) +static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, + bool twa_signal_ok) { struct task_struct *tsk = req->task; struct io_ring_ctx *ctx = req->ctx; @@ -4127,7 +4128,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) * will do the job. */ notify = 0; - if (!(ctx->flags & IORING_SETUP_SQPOLL)) + if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) notify = TWA_SIGNAL; ret = task_work_add(tsk, cb, notify); @@ -4141,6 +4142,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { struct task_struct *tsk; + bool twa_signal_ok; int ret; /* for instances that support it check for an event match first: */ @@ -4157,12 +4159,20 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, percpu_ref_get(&req->ctx->refs); /* + * If we using the signalfd wait_queue_head for this wakeup, then + * it's not safe to use TWA_SIGNAL as we could be recursing on the + * tsk->sighand->siglock on doing the wakeup. Should not be needed + * either, as the normal wakeup will suffice. + */ + twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); + + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok); if (unlikely(ret)) { WRITE_ONCE(poll->canceled, true); tsk = io_wq_get_task(req->ctx->io_wq); |