summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2020-10-06 16:19:24 +0100
committerMark Brown <broonie@kernel.org>2020-10-06 16:19:24 +0100
commitfd6b519a30a7179026d22c98d6bf10bb5ca8ca27 (patch)
treee4a6bc4d8548a5b8db7148a18eb257b84e72d48b /fs/io_uring.c
parent43499134f50a77844f0503df2c995f43b858f4c3 (diff)
parent856deb866d16e29bd65952e0289066f6078af773 (diff)
downloadlinux-stable-fd6b519a30a7179026d22c98d6bf10bb5ca8ca27.tar.gz
linux-stable-fd6b519a30a7179026d22c98d6bf10bb5ca8ca27.tar.bz2
linux-stable-fd6b519a30a7179026d22c98d6bf10bb5ca8ca27.zip
Merge tag 'v5.9-rc5' into asoc-5.10
Linux 5.9-rc5
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c169
1 files changed, 122 insertions, 47 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 91e2cc8414f9..3790c7fe9fee 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
io_req_init_async(req);
if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file)
+ if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else {
if (def->unbound_nonreg_file)
@@ -1746,7 +1746,8 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return __io_req_find_next(req);
}
-static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
+ bool twa_signal_ok)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
@@ -1759,7 +1760,7 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
* will do the job.
*/
notify = 0;
- if (!(ctx->flags & IORING_SETUP_SQPOLL))
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify);
@@ -1819,7 +1820,7 @@ static void io_req_task_queue(struct io_kiocb *req)
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -2048,6 +2049,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, inflight_entry);
if (READ_ONCE(req->result) == -EAGAIN) {
+ req->result = 0;
req->iopoll_completed = 0;
list_move_tail(&req->inflight_entry, &again);
continue;
@@ -2293,38 +2295,27 @@ end_req:
io_req_complete(req, ret);
return false;
}
-
-static void io_rw_resubmit(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_ring_ctx *ctx = req->ctx;
- int err;
-
- err = io_sq_thread_acquire_mm(ctx, req);
-
- if (io_resubmit_prep(req, err)) {
- refcount_inc(&req->refs);
- io_queue_async_work(req);
- }
-
- percpu_ref_put(&ctx->refs);
-}
#endif
static bool io_rw_reissue(struct io_kiocb *req, long res)
{
#ifdef CONFIG_BLOCK
+ umode_t mode = file_inode(req->file)->i_mode;
int ret;
+ if (!S_ISBLK(mode) && !S_ISREG(mode))
+ return false;
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
- init_task_work(&req->task_work, io_rw_resubmit);
- percpu_ref_get(&req->ctx->refs);
+ ret = io_sq_thread_acquire_mm(req->ctx, req);
- ret = io_req_task_work_add(req, &req->task_work);
- if (!ret)
+ if (io_resubmit_prep(req, ret)) {
+ refcount_inc(&req->refs);
+ io_queue_async_work(req);
return true;
+ }
+
#endif
return false;
}
@@ -2563,7 +2554,7 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
* IO with EINTR.
*/
ret = -EINTR;
- /* fall through */
+ fallthrough;
default:
kiocb->ki_complete(kiocb, ret, 0);
}
@@ -2865,6 +2856,11 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return iov_iter_count(&req->io->rw.iter);
}
+static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
+{
+ return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos;
+}
+
/*
* For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually.
@@ -2900,10 +2896,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
- iovec.iov_len, &kiocb->ki_pos);
+ iovec.iov_len, io_kiocb_ppos(kiocb));
} else {
nr = file->f_op->write(file, iovec.iov_base,
- iovec.iov_len, &kiocb->ki_pos);
+ iovec.iov_len, io_kiocb_ppos(kiocb));
}
if (iov_iter_is_bvec(iter))
@@ -2984,14 +2980,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
bool force_nonblock)
{
struct io_async_rw *iorw = &req->io->rw;
+ struct iovec *iov;
ssize_t ret;
- iorw->iter.iov = iorw->fast_iov;
- ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
- &iorw->iter, !force_nonblock);
+ iorw->iter.iov = iov = iorw->fast_iov;
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
if (unlikely(ret < 0))
return ret;
+ iorw->iter.iov = iov;
io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
return 0;
}
@@ -3044,7 +3041,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, true);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -3125,6 +3122,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
ret = 0;
@@ -3137,8 +3135,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
- iov_count = iov_iter_count(iter);
- ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3150,14 +3147,21 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
ret = 0;
goto out_free;
} else if (ret == -EAGAIN) {
- if (!force_nonblock)
+ /* IOPOLL retry should happen for io-wq threads */
+ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
+ goto done;
+ /* no retry on NONBLOCK marked file */
+ if (req->file->f_flags & O_NONBLOCK)
goto done;
+ /* some cases will consume bytes even on error returns */
+ iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (ret)
goto out_free;
return -EAGAIN;
} else if (ret < 0) {
- goto out_free;
+ /* make sure -ERESTARTSYS -> -EINTR is done */
+ goto done;
}
/* read it all, or we did blocking attempt. no retry. */
@@ -3241,6 +3245,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
if (ret < 0)
return ret;
+ iov_count = iov_iter_count(iter);
io_size = ret;
req->result = io_size;
@@ -3257,8 +3262,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
(req->flags & REQ_F_ISREG))
goto copy_iov;
- iov_count = iov_iter_count(iter);
- ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
if (unlikely(ret))
goto out_free;
@@ -3290,10 +3294,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
+ /* no retry on NONBLOCK marked file */
+ if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
+ goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
+ /* IOPOLL retry should happen for io-wq threads */
+ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
+ goto copy_iov;
+done:
kiocb_done(kiocb, ret2, cs);
} else {
copy_iov:
+ /* some cases will consume bytes even on error returns */
+ iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
if (!ret)
return -EAGAIN;
@@ -4566,6 +4579,7 @@ struct io_poll_table {
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
+ bool twa_signal_ok;
int ret;
/* for instances that support it check for an event match first: */
@@ -4581,12 +4595,20 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
percpu_ref_get(&req->ctx->refs);
/*
+ * If we using the signalfd wait_queue_head for this wakeup, then
+ * it's not safe to use TWA_SIGNAL as we could be recursing on the
+ * tsk->sighand->siglock on doing the wakeup. Should not be needed
+ * either, as the normal wakeup will suffice.
+ */
+ twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
+
+ /*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = io_req_task_work_add(req, &req->task_work);
+ ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok);
if (unlikely(ret)) {
struct task_struct *tsk;
@@ -4875,12 +4897,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
struct async_poll *apoll;
struct io_poll_table ipt;
__poll_t mask, ret;
+ int rw;
if (!req->file || !file_can_poll(req->file))
return false;
if (req->flags & REQ_F_POLLED)
return false;
- if (!def->pollin && !def->pollout)
+ if (def->pollin)
+ rw = READ;
+ else if (def->pollout)
+ rw = WRITE;
+ else
+ return false;
+ /* if we can't nonblock try, then no point in arming a poll handler */
+ if (!io_file_supports_async(req->file, rw))
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
@@ -7305,7 +7335,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
- file = io_file_from_index(ctx, index);
+ file = table->files[index];
err = io_queue_file_removal(data, file);
if (err)
break;
@@ -7334,6 +7364,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
if (err) {
+ table->files[index] = NULL;
fput(file);
break;
}
@@ -7433,9 +7464,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
{
int ret;
- mmgrab(current->mm);
- ctx->sqo_mm = current->mm;
-
if (ctx->flags & IORING_SETUP_SQPOLL) {
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
@@ -7480,10 +7508,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
return 0;
err:
io_finish_async(ctx);
- if (ctx->sqo_mm) {
- mmdrop(ctx->sqo_mm);
- ctx->sqo_mm = NULL;
- }
return ret;
}
@@ -8000,6 +8024,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
return false;
}
+static inline bool io_match_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+}
+
+static bool io_match_link_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ struct io_kiocb *link;
+
+ if (io_match_files(req, files))
+ return true;
+ if (req->flags & REQ_F_LINK_HEAD) {
+ list_for_each_entry(link, &req->link_list, link_list) {
+ if (io_match_files(link, files))
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* We're looking to cancel 'req' because it's holding on to our files, but
* 'req' could be a link to another request. See if it is, and cancel that
@@ -8074,12 +8120,38 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
io_timeout_remove_link(ctx, req);
}
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_link_files(de->req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return;
+ io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
@@ -8533,6 +8605,9 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx->user = user;
ctx->creds = get_current_cred();
+ mmgrab(current->mm);
+ ctx->sqo_mm = current->mm;
+
/*
* Account memory _before_ installing the file descriptor. Once
* the descriptor is installed, it can get closed at any time. Also