diff options
author | Jens Axboe <axboe@kernel.dk> | 2019-03-13 12:15:01 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-03-14 22:24:00 -0600 |
commit | d530a402a114efcf6d2b88d7f628856dade5b90b (patch) | |
tree | 7007b61671e49a097c0728a79b2c7cdb9f4d00a2 /fs/io_uring.c | |
parent | e0c5c576d5074b5bb7b1b4b59848c25ceb521331 (diff) | |
download | linux-d530a402a114efcf6d2b88d7f628856dade5b90b.tar.gz linux-d530a402a114efcf6d2b88d7f628856dade5b90b.tar.bz2 linux-d530a402a114efcf6d2b88d7f628856dade5b90b.zip |
io_uring: add prepped flag
We currently use the fact that if ->ki_filp is already set, then we've
done the prep. In preparation for moving the file assignment earlier,
use a separate flag to tell whether the request has been prepped for
IO or not.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index caf39663466f..d259e8a6cb2e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -214,6 +214,7 @@ struct io_kiocb { #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ #define REQ_F_FIXED_FILE 4 /* ctx owns file */ #define REQ_F_SEQ_PREV 8 /* sequential with previous */ +#define REQ_F_PREPPED 16 /* prep already done */ u64 user_data; u64 error; @@ -741,7 +742,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, int fd, ret; /* For -EAGAIN retry, everything is already prepped */ - if (kiocb->ki_filp) + if (req->flags & REQ_F_PREPPED) return 0; flags = READ_ONCE(sqe->flags); @@ -799,6 +800,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, } kiocb->ki_complete = io_complete_rw; } + req->flags |= REQ_F_PREPPED; return 0; out_fput: if (!(flags & IOSQE_FIXED_FILE)) { @@ -1099,8 +1101,8 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) unsigned flags; int fd; - /* Prep already done */ - if (req->rw.ki_filp) + /* Prep already done (EAGAIN retry) */ + if (req->flags & REQ_F_PREPPED) return 0; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) @@ -1122,6 +1124,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EBADF; } + req->flags |= REQ_F_PREPPED; return 0; } @@ -1632,8 +1635,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, if (unlikely(!req)) return -EAGAIN; - req->rw.ki_filp = NULL; - ret = __io_submit_sqe(ctx, req, s, true, state); if (ret == -EAGAIN) { struct io_uring_sqe *sqe_copy; |