diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-29 16:48:21 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-29 16:48:21 -0800 |
commit | ac787ffa5a246e53675ae93294420ea948600818 (patch) | |
tree | 8b67b4ecddc0125f1c6ed163c4bc79070ddac2aa | |
parent | 69fb073b5ba6d7c9358a04115ed61b78c73790ce (diff) | |
parent | 9eb803402a2a83400c6c6afd900e3b7c87c06816 (diff) | |
download | linux-stable-ac787ffa5a246e53675ae93294420ea948600818.tar.gz linux-stable-ac787ffa5a246e53675ae93294420ea948600818.tar.bz2 linux-stable-ac787ffa5a246e53675ae93294420ea948600818.zip |
Merge tag 'io_uring-6.2-2022-12-29' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe:
- Two fixes for mutex grabbing when the task state is != TASK_RUNNING
(me)
- Check for invalid opcode in io_uring_register() a bit earlier, to
avoid going through the quiesce machinery just to return -EINVAL
later in the process (me)
- Fix for the uapi io_uring header, skipping including time_types.h
when necessary (Stefan)
* tag 'io_uring-6.2-2022-12-29' of git://git.kernel.dk/linux:
uapi:io_uring.h: allow linux/time_types.h to be skipped
io_uring: check for valid register opcode earlier
io_uring/cancel: re-grab ctx mutex after finishing wait
io_uring: finish waiting before flushing overflow entries
-rw-r--r-- | include/uapi/linux/io_uring.h | 8 | ||||
-rw-r--r-- | io_uring/cancel.c | 9 | ||||
-rw-r--r-- | io_uring/io_uring.c | 30 |
3 files changed, 31 insertions, 16 deletions
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 9d4c4078e8d0..2780bce62faf 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -10,7 +10,15 @@ #include <linux/fs.h> #include <linux/types.h> +/* + * this file is shared with liburing and that has to autodetect + * if linux/time_types.h is available or not, it can + * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H + * if linux/time_types.h is not available + */ +#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H #include <linux/time_types.h> +#endif #ifdef __cplusplus extern "C" { diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 2291a53cdabd..b4f5dfacc0c3 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg) ret = __io_sync_cancel(current->io_uring, &cd, sc.fd); + mutex_unlock(&ctx->uring_lock); if (ret != -EALREADY) break; - mutex_unlock(&ctx->uring_lock); ret = io_run_task_work_sig(ctx); - if (ret < 0) { - mutex_lock(&ctx->uring_lock); + if (ret < 0) break; - } ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS); - mutex_lock(&ctx->uring_lock); if (!ret) { ret = -ETIME; break; } + mutex_lock(&ctx->uring_lock); } while (1); finish_wait(&ctx->cq_wait, &wait); + mutex_lock(&ctx->uring_lock); if (ret == -ENOENT || ret > 0) ret = 0; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index ff2bbac1a10f..58ac13b69dc8 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) io_cq_unlock_post(ctx); } +static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) +{ + /* iopoll syncs against uring_lock, not completion_lock */ + if (ctx->flags & IORING_SETUP_IOPOLL) + mutex_lock(&ctx->uring_lock); + __io_cqring_overflow_flush(ctx); + if (ctx->flags & IORING_SETUP_IOPOLL) + mutex_unlock(&ctx->uring_lock); +} + static void io_cqring_overflow_flush(struct io_ring_ctx *ctx) { - if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { - /* iopoll syncs against uring_lock, not completion_lock */ - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx); - if (ctx->flags & IORING_SETUP_IOPOLL) - mutex_unlock(&ctx->uring_lock); - } + if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) + io_cqring_do_overflow_flush(ctx); } void __io_put_task(struct task_struct *task, int nr) @@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, trace_io_uring_cqring_wait(ctx, min_events); do { - io_cqring_overflow_flush(ctx); + if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { + finish_wait(&ctx->cq_wait, &iowq.wq); + io_cqring_do_overflow_flush(ctx); + } prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, TASK_INTERRUPTIBLE); ret = io_cqring_wait_schedule(ctx, &iowq, timeout); @@ -4013,8 +4020,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, return -EEXIST; if (ctx->restricted) { - if (opcode >= IORING_REGISTER_LAST) - return -EINVAL; opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); if (!test_bit(opcode, ctx->restrictions.register_op)) return -EACCES; @@ -4170,6 +4175,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, long ret = -EBADF; struct fd f; + if (opcode >= IORING_REGISTER_LAST) + return -EINVAL; + f = fdget(fd); if (!f.file) return -EBADF; |