summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-11-27 17:02:48 -0700
committerJens Axboe <axboe@kernel.dk>2023-11-28 11:45:02 -0700
commit07d6063d3d3beb3168d3ac9fdef7bca81254d983 (patch)
tree7cbaed7cd0f7bad2e5cde263bd40094a9dc5d259 /io_uring
parentb10b73c102a2eab91e1cd62a03d6446f1dfecc64 (diff)
downloadlinux-stable-07d6063d3d3beb3168d3ac9fdef7bca81254d983.tar.gz
linux-stable-07d6063d3d3beb3168d3ac9fdef7bca81254d983.tar.bz2
linux-stable-07d6063d3d3beb3168d3ac9fdef7bca81254d983.zip
io_uring/kbuf: prune deferred locked cache when tearing down
We used to just use our page list for final teardown, which would ensure that we got all the buffers, even the ones that were not on the normal cached list. But while moving to slab for the io_buffers, we know only prune this list, not the deferred locked list that we have. This can cause a leak of memory, if the workload ends up using the intermediate locked list. Fix this by always pruning both lists when tearing down. Fixes: b3a4dbc89d40 ("io_uring/kbuf: Use slab for struct io_buffer objects") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/kbuf.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 325ca7f8b0a0..39d15a27eb92 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -306,6 +306,14 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
kfree(bl);
}
+ /*
+ * Move deferred locked entries to cache before pruning
+ */
+ spin_lock(&ctx->completion_lock);
+ if (!list_empty(&ctx->io_buffers_comp))
+ list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
+ spin_unlock(&ctx->completion_lock);
+
list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
buf = list_entry(item, struct io_buffer, list);
kmem_cache_free(io_buf_cachep, buf);