From f688944cfb810986c626cb13d95bc666e5c8a36c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 6 Nov 2023 07:43:16 -0700 Subject: io_uring/rw: add separate prep handler for fixed read/write Rather than sprinkle opcode checks in the generic read/write prep handler, have a separate prep handler for the vectored readv/writev operation. Signed-off-by: Jens Axboe --- io_uring/opdef.c | 4 ++-- io_uring/rw.c | 30 ++++++++++++++++++------------ io_uring/rw.h | 1 + 3 files changed, 21 insertions(+), 14 deletions(-) (limited to 'io_uring') diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 0521a26bc6cd..799db44283c7 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw, + .prep = io_prep_rw_fixed, .issue = io_read, }, [IORING_OP_WRITE_FIXED] = { @@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw, + .prep = io_prep_rw_fixed, .issue = io_write, }, [IORING_OP_POLL_ADD] = { diff --git a/io_uring/rw.c b/io_uring/rw.c index 63d343bae762..9e3e56b74e35 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) /* used for fixed read/write too - just read unconditionally */ req->buf_index = READ_ONCE(sqe->buf_index); - if (req->opcode == IORING_OP_READ_FIXED || - req->opcode == IORING_OP_WRITE_FIXED) { - struct io_ring_ctx *ctx = req->ctx; - u16 index; - - if (unlikely(req->buf_index >= ctx->nr_user_bufs)) - return -EFAULT; - index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); - req->imu = ctx->user_bufs[index]; - io_req_set_rsrc_node(req, ctx, 0); - } - ioprio = READ_ONCE(sqe->ioprio); if (ioprio) { ret = ioprio_check_cap(ioprio); @@ -131,6 +119,24 @@ int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + struct io_ring_ctx *ctx = req->ctx; + u16 index; + int ret; + + ret = io_prep_rw(req, sqe); + if (unlikely(ret)) + return ret; + + if (unlikely(req->buf_index >= ctx->nr_user_bufs)) + return -EFAULT; + index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); + req->imu = ctx->user_bufs[index]; + io_req_set_rsrc_node(req, ctx, 0); + return 0; +} + /* * Multishot read is prepared just like a normal read/write request, only * difference is that we set the MULTISHOT flag. diff --git a/io_uring/rw.h b/io_uring/rw.h index 32aa7937513a..f9e89b4fe4da 100644 --- a/io_uring/rw.h +++ b/io_uring/rw.h @@ -17,6 +17,7 @@ struct io_async_rw { int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_read(struct io_kiocb *req, unsigned int issue_flags); int io_readv_prep_async(struct io_kiocb *req); int io_write(struct io_kiocb *req, unsigned int issue_flags); -- cgit v1.2.3