summaryrefslogtreecommitdiffstats
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-18 07:36:03 -0600
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:25 -0600
commitc6f32c7d9e09bf1368447e9a29e869193ecbb756 (patch)
tree941e0c2eaa18d1b8f663d842659d07f00a5d6a49 /io_uring/net.c
parent3ba8345aec886a3a01331e944a6a8568bf94bd10 (diff)
downloadlinux-c6f32c7d9e09bf1368447e9a29e869193ecbb756.tar.gz
linux-c6f32c7d9e09bf1368447e9a29e869193ecbb756.tar.bz2
linux-c6f32c7d9e09bf1368447e9a29e869193ecbb756.zip
io_uring/net: get rid of ->prep_async() for receive side
Move the io_async_msghdr out of the issue path and into prep handling, since it's now done unconditionally and hence does not need to be part of the issue path. This reduces the footprint of the multishot fast path of multiple invocations of ->issue() per prep, and also means that using ->prep_async() can be dropped for recvmsg asthis is now done via setup on the prep side. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c71
1 files changed, 28 insertions, 43 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index b08c0ae5951a..7cd93cd8b8c4 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -595,17 +595,36 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
msg.msg_controllen);
}
-int io_recvmsg_prep_async(struct io_kiocb *req)
+static int io_recvmsg_prep_setup(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *iomsg;
+ struct io_async_msghdr *kmsg;
int ret;
- sr->done_io = 0;
- if (!io_msg_alloc_async_prep(req))
+ /* always locked for prep */
+ kmsg = io_msg_alloc_async(req, 0);
+ if (unlikely(!kmsg))
return -ENOMEM;
- iomsg = req->async_data;
- ret = io_recvmsg_copy_hdr(req, iomsg);
+
+ if (req->opcode == IORING_OP_RECV) {
+ kmsg->msg.msg_name = NULL;
+ kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_control = NULL;
+ kmsg->msg.msg_get_inq = 1;
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_iocb = NULL;
+ kmsg->msg.msg_ubuf = NULL;
+
+ if (!io_do_buffer_select(req)) {
+ ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ }
+ return 0;
+ }
+
+ ret = io_recvmsg_copy_hdr(req, kmsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
@@ -656,7 +675,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
sr->nr_multishot_loops = 0;
- return 0;
+ return io_recvmsg_prep_setup(req);
}
static inline void io_recv_prep_retry(struct io_kiocb *req,
@@ -814,7 +833,7 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg;
+ struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
@@ -825,17 +844,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- kmsg = io_msg_alloc_async(req, issue_flags);
- if (unlikely(!kmsg))
- return -ENOMEM;
- ret = io_recvmsg_copy_hdr(req, kmsg);
- if (ret)
- return ret;
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
@@ -914,36 +922,13 @@ retry_multishot:
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg;
+ struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
size_t len = sr->len;
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- kmsg = io_msg_alloc_async(req, issue_flags);
- if (unlikely(!kmsg))
- return -ENOMEM;
- kmsg->free_iov = NULL;
- kmsg->msg.msg_name = NULL;
- kmsg->msg.msg_namelen = 0;
- kmsg->msg.msg_control = NULL;
- kmsg->msg.msg_get_inq = 1;
- kmsg->msg.msg_controllen = 0;
- kmsg->msg.msg_iocb = NULL;
- kmsg->msg.msg_ubuf = NULL;
-
- if (!io_do_buffer_select(req)) {
- ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- }
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;