summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-03-17 02:03:40 +0000
committerJens Axboe <axboe@kernel.dk>2022-03-16 20:26:32 -0600
commit66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb (patch)
tree2fecf2834ba462bf61468b1b5799eebd7f210dc7 /fs/io_uring.c
parent0f84747177b962c32243a57cb454193bdba4fe8d (diff)
downloadlinux-66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb.tar.gz
linux-66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb.tar.bz2
linux-66fc25ca6b7ec4124606e0d59c71c6bcf14e05bb.zip
io_uring: shuffle io_eventfd_signal() bits around
A preparation patch, which moves a fast ->io_ev_fd check out of io_eventfd_signal() into ev_posted*(). Compilers are smart enough for it to not change anything, but will need it later. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/ec4091ac76d43912b73917e8db651c2dac4b7b01.1647481208.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 692dbe7b98e9..31c625f61fd8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1828,10 +1828,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
{
struct io_ev_fd *ev_fd;
- /* Return quickly if ctx->io_ev_fd doesn't exist */
- if (likely(!rcu_dereference_raw(ctx->io_ev_fd)))
- return;
-
rcu_read_lock();
/*
* rcu_dereference ctx->io_ev_fd once and use it for both for checking
@@ -1851,7 +1847,6 @@ static void io_eventfd_signal(struct io_ring_ctx *ctx)
if (!ev_fd->eventfd_async || io_wq_current_is_worker())
eventfd_signal(ev_fd->cq_ev_fd, 1);
-
out:
rcu_read_unlock();
}
@@ -1863,7 +1858,7 @@ out:
* 1:1 relationship between how many times this function is called (and
* hence the eventfd count) and number of CQEs posted to the CQ ring.
*/
-static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
+static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
/*
* wake_up_all() may seem excessive, but io_wake_function() and
@@ -1872,7 +1867,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
*/
if (wq_has_sleeper(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
- io_eventfd_signal(ctx);
+ if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
+ io_eventfd_signal(ctx);
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1881,7 +1877,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
if (wq_has_sleeper(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait);
}
- io_eventfd_signal(ctx);
+ if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
+ io_eventfd_signal(ctx);
}
/* Returns true if there are no backlogged entries after the flush */