diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-08-20 11:03:11 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-08-20 11:03:11 -0600 |
commit | a3a0e43fd77013819e4b6f55e37e0efe8e35d805 (patch) | |
tree | ee2773bfcdb36cbd4f721a2e79d325f4b5605c72 /fs | |
parent | cb32de1b7e2591f844f18a5513fde8e2bd49bce0 (diff) | |
download | linux-stable-a3a0e43fd77013819e4b6f55e37e0efe8e35d805.tar.gz linux-stable-a3a0e43fd77013819e4b6f55e37e0efe8e35d805.tar.bz2 linux-stable-a3a0e43fd77013819e4b6f55e37e0efe8e35d805.zip |
io_uring: don't enter poll loop if we have CQEs pending
We need to check if we have CQEs pending before starting a poll loop,
as those could be the events we will be spinning for (and hence we'll
find none). This can happen if a CQE triggers an error, or if it is
found by eg an IRQ before we get a chance to find it through polling.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 36f04d0b197b..e7a43a354d91 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } +static unsigned io_cqring_events(struct io_cq_ring *ring) +{ + /* See comment at the top of this file */ + smp_rmb(); + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); +} + /* * Find and free completed poll iocbs */ @@ -819,6 +826,14 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, int tmin = 0; /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (io_cqring_events(ctx->cq_ring)) + break; + + /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets * issued. That app will hold the uring_lock for the duration @@ -2449,13 +2464,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) return submit; } -static unsigned io_cqring_events(struct io_cq_ring *ring) -{ - /* See comment at the top of this file */ - smp_rmb(); - return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); -} - /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring. |