summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-03-22 14:12:33 -0600
committerJens Axboe <axboe@kernel.dk>2022-03-23 06:26:06 -0600
commit4d55f238f8b89124f73e50abbd05e413def514fe (patch)
tree708d8aab0fded171847c4c9a15689e25d42ab145 /fs
parentd89a4fac0fbc6fe5fc24d1c9a889440dcf410368 (diff)
downloadlinux-stable-4d55f238f8b89124f73e50abbd05e413def514fe.tar.gz
linux-stable-4d55f238f8b89124f73e50abbd05e413def514fe.tar.bz2
linux-stable-4d55f238f8b89124f73e50abbd05e413def514fe.zip
io_uring: don't recycle provided buffer if punted to async worker
We only really need to recycle the buffer when going async for a file type that has an indefinite reponse time (eg non-file/bdev). And for files that to arm poll, the async worker will arm poll anyway and the buffer will get recycled there. In that latter case, we're not holding ctx->uring_lock. Ensure we take the issue_flags into account and acquire it if we need to. Fixes: b1c62645758e ("io_uring: recycle provided buffers if request goes async") Reported-by: Stefan Roesch <shr@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6395393eaf9e..f41d91ce1fd0 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1383,7 +1383,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
return NULL;
}
-static void io_kbuf_recycle(struct io_kiocb *req)
+static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
@@ -1392,6 +1392,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
return;
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_lock(&ctx->uring_lock);
+
lockdep_assert_held(&ctx->uring_lock);
buf = req->kbuf;
@@ -1399,6 +1402,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
list_add(&buf->list, &bl->buf_list);
req->flags &= ~REQ_F_BUFFER_SELECTED;
req->kbuf = NULL;
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_unlock(&ctx->uring_lock);
}
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -6254,7 +6260,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
- io_kbuf_recycle(req);
+ io_kbuf_recycle(req, issue_flags);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
if (ret || ipt.error)
@@ -7504,7 +7510,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
- io_kbuf_recycle(req);
io_queue_async_work(req, NULL);
break;
case IO_APOLL_OK: