diff options
author | Stefan Roesch <shr@fb.com> | 2022-04-26 11:21:28 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-05-09 06:35:34 -0600 |
commit | 2fee6bc6407848043798698116b8fd81d1fe470a (patch) | |
tree | 51eb8a3548bbb234e98788c090f0a9b19fb9fce7 /fs/io_uring.c | |
parent | effcf8bdeb03aa726e9db834325c650e1700b041 (diff) | |
download | linux-stable-2fee6bc6407848043798698116b8fd81d1fe470a.tar.gz linux-stable-2fee6bc6407848043798698116b8fd81d1fe470a.tar.bz2 linux-stable-2fee6bc6407848043798698116b8fd81d1fe470a.zip |
io_uring: modify io_get_cqe for CQE32
Modify accesses to the CQE array to take large CQE's into account. The
index needs to be shifted by one for large CQE's.
Signed-off-by: Stefan Roesch <shr@fb.com>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Link: https://lore.kernel.org/r/20220426182134.136504-7-shr@fb.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 5ec7a4b82aa5..3915251bfeca 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2069,8 +2069,12 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); + unsigned int shift = 0; unsigned int free, queued, len; + if (ctx->flags & IORING_SETUP_CQE32) + shift = 1; + /* userspace may cheat modifying the tail, be safe and do min */ queued = min(__io_cqring_events(ctx), ctx->cq_entries); free = ctx->cq_entries - queued; @@ -2082,15 +2086,26 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) ctx->cached_cq_tail++; ctx->cqe_cached = &rings->cqes[off]; ctx->cqe_sentinel = ctx->cqe_cached + len; - return ctx->cqe_cached++; + ctx->cqe_cached++; + return &rings->cqes[off << shift]; } static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) { if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { + struct io_uring_cqe *cqe = ctx->cqe_cached; + + if (ctx->flags & IORING_SETUP_CQE32) { + unsigned int off = ctx->cqe_cached - ctx->rings->cqes; + + cqe += off; + } + ctx->cached_cq_tail++; - return ctx->cqe_cached++; + ctx->cqe_cached++; + return cqe; } + return __io_get_cqe(ctx); } |