summaryrefslogtreecommitdiffstats
path: root/fs/userfaultfd.c
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2015-09-22 14:58:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-22 15:09:53 -0700
commitac5be6b47e8bd25b62bed2c82cda7398999f59e9 (patch)
treee658ea2695b55e0b6726df10a47ac7d22837a3c1 /fs/userfaultfd.c
parentbcee19f424a0d8c26ecf2607b73c690802658b29 (diff)
downloadlinux-stable-ac5be6b47e8bd25b62bed2c82cda7398999f59e9.tar.gz
linux-stable-ac5be6b47e8bd25b62bed2c82cda7398999f59e9.tar.bz2
linux-stable-ac5be6b47e8bd25b62bed2c82cda7398999f59e9.zip
userfaultfd: revert "userfaultfd: waitqueue: add nr wake parameter to __wake_up_locked_key"
This reverts commit 51360155eccb907ff8635bd10fc7de876408c2e0 and adapts fs/userfaultfd.c to use the old version of that function. It didn't look robust to call __wake_up_common with "nr == 1" when we absolutely require wakeall semantics, but we've full control of what we insert in the two waitqueue heads of the blocked userfaults. No exclusive waitqueue risks to be inserted into those two waitqueue heads so we can as well stick to "nr == 1" of the old code and we can rely purely on the fact no waitqueue inserted in one of the two waitqueue heads we must enforce as wakeall, has wait->flags WQ_FLAG_EXCLUSIVE set. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Shuah Khan <shuahkh@osg.samsung.com> Cc: Thierry Reding <treding@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/userfaultfd.c')
-rw-r--r--fs/userfaultfd.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f9aeb40a7197..50311703135b 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -467,8 +467,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* the fault_*wqh.
*/
spin_lock(&ctx->fault_pending_wqh.lock);
- __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range);
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range);
+ __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
+ __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
spin_unlock(&ctx->fault_pending_wqh.lock);
wake_up_poll(&ctx->fd_wqh, POLLHUP);
@@ -650,10 +650,10 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
spin_lock(&ctx->fault_pending_wqh.lock);
/* wake all in the range and autoremove */
if (waitqueue_active(&ctx->fault_pending_wqh))
- __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0,
+ __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
range);
if (waitqueue_active(&ctx->fault_wqh))
- __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range);
+ __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
spin_unlock(&ctx->fault_pending_wqh.lock);
}