summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2015-09-04 15:47:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 16:54:41 -0700
commit2c5b7e1be74ff0175dedbbd325abe9f0dbbb09ae (patch)
treee93f6aa423faeb6fbcd46103694d629b0a7bb82c /fs
parent230c92a8797e0e717c6732de0fffdd5726c0f48f (diff)
downloadlinux-stable-2c5b7e1be74ff0175dedbbd325abe9f0dbbb09ae.tar.gz
linux-stable-2c5b7e1be74ff0175dedbbd325abe9f0dbbb09ae.tar.bz2
linux-stable-2c5b7e1be74ff0175dedbbd325abe9f0dbbb09ae.zip
userfaultfd: avoid missing wakeups during refile in userfaultfd_read
During the refile in userfaultfd_read both waitqueues could look empty to the lockless wake_userfault(). Use a seqcount to prevent this false negative that could leave an userfault blocked. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/userfaultfd.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index a14d63e945f4..634e676072cb 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -45,6 +45,8 @@ struct userfaultfd_ctx {
wait_queue_head_t fault_wqh;
/* waitqueue head for the pseudo fd to wakeup poll/read */
wait_queue_head_t fd_wqh;
+ /* a refile sequence protected by fault_pending_wqh lock */
+ struct seqcount refile_seq;
/* pseudo fd refcounting */
atomic_t refcount;
/* userfaultfd syscall flags */
@@ -547,6 +549,15 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
uwq = find_userfault(ctx);
if (uwq) {
/*
+ * Use a seqcount to repeat the lockless check
+ * in wake_userfault() to avoid missing
+ * wakeups because during the refile both
+ * waitqueue could become empty if this is the
+ * only userfault.
+ */
+ write_seqcount_begin(&ctx->refile_seq);
+
+ /*
* The fault_pending_wqh.lock prevents the uwq
* to disappear from under us.
*
@@ -570,6 +581,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
list_del(&uwq->wq.task_list);
__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
+ write_seqcount_end(&ctx->refile_seq);
+
/* careful to always initialize msg if ret == 0 */
*msg = uwq->msg;
spin_unlock(&ctx->fault_pending_wqh.lock);
@@ -647,6 +660,9 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
struct userfaultfd_wake_range *range)
{
+ unsigned seq;
+ bool need_wakeup;
+
/*
* To be sure waitqueue_active() is not reordered by the CPU
* before the pagetable update, use an explicit SMP memory
@@ -662,8 +678,13 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
* userfaults yet. So we take the spinlock only when we're
* sure we've userfaults to wake.
*/
- if (waitqueue_active(&ctx->fault_pending_wqh) ||
- waitqueue_active(&ctx->fault_wqh))
+ do {
+ seq = read_seqcount_begin(&ctx->refile_seq);
+ need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
+ waitqueue_active(&ctx->fault_wqh);
+ cond_resched();
+ } while (read_seqcount_retry(&ctx->refile_seq, seq));
+ if (need_wakeup)
__wake_userfault(ctx, range);
}
@@ -1219,6 +1240,7 @@ static void init_once_userfaultfd_ctx(void *mem)
init_waitqueue_head(&ctx->fault_pending_wqh);
init_waitqueue_head(&ctx->fault_wqh);
init_waitqueue_head(&ctx->fd_wqh);
+ seqcount_init(&ctx->refile_seq);
}
/**