summaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-13 14:45:08 -0700
committerBenjamin LaHaise <bcrl@kvack.org>2013-07-30 11:53:11 -0400
commitbec68faaf3ba74ed0dcd5dc3a881b30aec542973 (patch)
treefc8704d99fea0108346de7c1ca2500ac7199f1ae /fs/aio.c
parent723be6e39d14254bb5bb9f422b434566d359fa6e (diff)
downloadlinux-bec68faaf3ba74ed0dcd5dc3a881b30aec542973.tar.gz
linux-bec68faaf3ba74ed0dcd5dc3a881b30aec542973.tar.bz2
linux-bec68faaf3ba74ed0dcd5dc3a881b30aec542973.zip
aio: io_cancel() no longer returns the io_event
Originally, io_event() was documented to return the io_event if cancellation succeeded - the io_event wouldn't be delivered via the ring buffer like it normally would. But this isn't what the implementation was actually doing; the only driver implementing cancellation, the usb gadget code, never returned an io_event in its cancel function. And aio_complete() was recently changed to no longer suppress event delivery if the kiocb had been cancelled. This gets rid of the unused io_event argument to kiocb_cancel() and kiocb->ki_cancel(), and changes io_cancel() to return -EINPROGRESS if kiocb->ki_cancel() returned success. Also tweak the refcounting in kiocb_cancel() to make more sense. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c40
1 files changed, 10 insertions, 30 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 7b470bfbf891..12b37689dd2c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -358,8 +358,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
-static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
- struct io_event *res)
+static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
{
kiocb_cancel_fn *old, *cancel;
int ret = -EINVAL;
@@ -381,12 +380,10 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
atomic_inc(&kiocb->ki_users);
spin_unlock_irq(&ctx->ctx_lock);
- memset(res, 0, sizeof(*res));
- res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
- res->data = kiocb->ki_user_data;
- ret = cancel(kiocb, res);
+ ret = cancel(kiocb);
spin_lock_irq(&ctx->ctx_lock);
+ aio_put_req(kiocb);
return ret;
}
@@ -408,7 +405,6 @@ static void free_ioctx(struct work_struct *work)
{
struct kioctx *ctx = container_of(work, struct kioctx, free_work);
struct aio_ring *ring;
- struct io_event res;
struct kiocb *req;
unsigned cpu, head, avail;
@@ -419,7 +415,7 @@ static void free_ioctx(struct work_struct *work)
struct kiocb, ki_list);
list_del_init(&req->ki_list);
- kiocb_cancel(ctx, req, &res);
+ kiocb_cancel(ctx, req);
}
spin_unlock_irq(&ctx->ctx_lock);
@@ -796,21 +792,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
}
/*
- * cancelled requests don't get events, userland was given one
- * when the event got cancelled.
- */
- if (unlikely(xchg(&iocb->ki_cancel,
- KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
- /*
- * Can't use the percpu reqs_available here - could race with
- * free_ioctx()
- */
- atomic_inc(&ctx->reqs_available);
- /* Still need the wake_up in case free_ioctx is waiting */
- goto put_rq;
- }
-
- /*
* Add a completion event to the ring buffer. Must be done holding
* ctx->completion_lock to prevent other code from messing with the tail
* pointer since we might be called from irq context.
@@ -862,7 +843,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
if (iocb->ki_eventfd != NULL)
eventfd_signal(iocb->ki_eventfd, 1);
-put_rq:
/* everything turned out well, dispose of the aiocb. */
aio_put_req(iocb);
@@ -1439,7 +1419,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct io_event __user *, result)
{
- struct io_event res;
struct kioctx *ctx;
struct kiocb *kiocb;
u32 key;
@@ -1457,18 +1436,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
kiocb = lookup_kiocb(ctx, iocb, key);
if (kiocb)
- ret = kiocb_cancel(ctx, kiocb, &res);
+ ret = kiocb_cancel(ctx, kiocb);
else
ret = -EINVAL;
spin_unlock_irq(&ctx->ctx_lock);
if (!ret) {
- /* Cancellation succeeded -- copy the result
- * into the user's buffer.
+ /*
+ * The result argument is no longer used - the io_event is
+ * always delivered via the ring buffer. -EINPROGRESS indicates
+ * cancellation is progress:
*/
- if (copy_to_user(result, &res, sizeof(res)))
- ret = -EFAULT;
+ ret = -EINPROGRESS;
}
percpu_ref_put(&ctx->users);