summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2024-04-10 02:26:55 +0100
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:27 -0600
commit6b231248e97fc37d4205449d48747b5a3b4c2fcc (patch)
tree386a6ce9353516b3a591220bfdc5a0d66271e9bd /io_uring/io_uring.c
parent8d09a88ef9d3cb7d21d45c39b7b7c31298d23998 (diff)
downloadlinux-6b231248e97fc37d4205449d48747b5a3b4c2fcc.tar.gz
linux-6b231248e97fc37d4205449d48747b5a3b4c2fcc.tar.bz2
linux-6b231248e97fc37d4205449d48747b5a3b4c2fcc.zip
io_uring: consolidate overflow flushing
Consolidate __io_cqring_overflow_flush and io_cqring_overflow_kill() into a single function as it once was, it's easier to work with it this way. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/986b42c35e76a6be7aa0cdcda0a236a2222da3a7.1712708261.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c40
1 files changed, 15 insertions, 25 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 92ac9c0fc597..c4419eef7e63 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -668,26 +668,7 @@ static void io_cq_unlock_post(struct io_ring_ctx *ctx)
io_commit_cqring_flush(ctx);
}
-static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
-{
- struct io_overflow_cqe *ocqe;
- LIST_HEAD(list);
-
- lockdep_assert_held(&ctx->uring_lock);
-
- spin_lock(&ctx->completion_lock);
- list_splice_init(&ctx->cq_overflow_list, &list);
- clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
- spin_unlock(&ctx->completion_lock);
-
- while (!list_empty(&list)) {
- ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
- list_del(&ocqe->list);
- kfree(ocqe);
- }
-}
-
-static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
{
size_t cqe_size = sizeof(struct io_uring_cqe);
@@ -704,11 +685,14 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe;
- if (!io_get_cqe_overflow(ctx, &cqe, true))
- break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
- memcpy(cqe, &ocqe->cqe, cqe_size);
+
+ if (!dying) {
+ if (!io_get_cqe_overflow(ctx, &cqe, true))
+ break;
+ memcpy(cqe, &ocqe->cqe, cqe_size);
+ }
list_del(&ocqe->list);
kfree(ocqe);
}
@@ -720,10 +704,16 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx);
}
+static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
+{
+ if (ctx->rings)
+ __io_cqring_overflow_flush(ctx, true);
+}
+
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
- __io_cqring_overflow_flush(ctx);
+ __io_cqring_overflow_flush(ctx, false);
mutex_unlock(&ctx->uring_lock);
}
@@ -1531,7 +1521,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- __io_cqring_overflow_flush(ctx);
+ __io_cqring_overflow_flush(ctx, false);
/*
* Similarly do not spin if we have not informed the user of any
* dropped CQE.