summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-08-01 08:42:37 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-08-11 12:08:08 +0200
commitc7920f992840779383cbff15a2e860515eec7644 (patch)
tree71eae4122666e2acdfc51053b2161e6bc54f1380 /io_uring
parent0a4a7855302d56a1d75cec3aa9a6914a3af9c6af (diff)
downloadlinux-stable-c7920f992840779383cbff15a2e860515eec7644.tar.gz
linux-stable-c7920f992840779383cbff15a2e860515eec7644.tar.bz2
linux-stable-c7920f992840779383cbff15a2e860515eec7644.zip
io_uring: gate iowait schedule on having pending requests
Commit 7b72d661f1f2f950ab8c12de7e2bc48bdac8ed69 upstream. A previous commit made all cqring waits marked as iowait, as a way to improve performance for short schedules with pending IO. However, for use cases that have a special reaper thread that does nothing but wait on events on the ring, this causes a cosmetic issue where we know have one core marked as being "busy" with 100% iowait. While this isn't a grave issue, it is confusing to users. Rather than always mark us as being in iowait, gate setting of current->in_iowait to 1 by whether or not the waiting task has pending requests. Cc: stable@vger.kernel.org Link: https://lore.kernel.org/io-uring/CAMEGJJ2RxopfNQ7GNLhr7X9=bHXKo+G5OOe0LUq=+UgLXsv1Xg@mail.gmail.com/ Link: https://bugzilla.kernel.org/show_bug.cgi?id=217699 Link: https://bugzilla.kernel.org/show_bug.cgi?id=217700 Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name> Reported-by: Phil Elwell <phil@raspberrypi.com> Tested-by: Andres Freund <andres@anarazel.de> Fixes: 8a796565cec3 ("io_uring: Use io_schedule* in cqring wait") Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f091153bc854..ed8e9deae284 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2349,12 +2349,21 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
return 0;
}
+static bool current_pending_io(void)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (!tctx)
+ return false;
+ return percpu_counter_read_positive(&tctx->inflight);
+}
+
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
ktime_t *timeout)
{
- int token, ret;
+ int io_wait, ret;
unsigned long check_cq;
/* make sure we run task_work before checking for signals */
@@ -2372,15 +2381,17 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
}
/*
- * Use io_schedule_prepare/finish, so cpufreq can take into account
- * that the task is waiting for IO - turns out to be important for low
- * QD IO.
+ * Mark us as being in io_wait if we have pending requests, so cpufreq
+ * can take into account that the task is waiting for IO - turns out
+ * to be important for low QD IO.
*/
- token = io_schedule_prepare();
+ io_wait = current->in_iowait;
+ if (current_pending_io())
+ current->in_iowait = 1;
ret = 1;
if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- io_schedule_finish(token);
+ current->in_iowait = io_wait;
return ret;
}