summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2017-03-31 13:35:01 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-12 12:38:34 +0200
commitdde6f22c1e122907717f45405cbc2c6227e259e5 (patch)
tree7cfef880dbf7ba5918be6893926c274e9a33407d /arch
parentae781dee56e4805311f0615ca04ea226bfbcafcd (diff)
downloadlinux-stable-dde6f22c1e122907717f45405cbc2c6227e259e5.tar.gz
linux-stable-dde6f22c1e122907717f45405cbc2c6227e259e5.tar.bz2
linux-stable-dde6f22c1e122907717f45405cbc2c6227e259e5.zip
metag/usercopy: Add early abort to copy_to_user
commit fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b upstream. When copying to userland on Meta, if any faults are encountered immediately abort the copy instead of continuing on and repeatedly faulting, and worse potentially copying further bytes successfully to subsequent valid pages. Fixes: 373cd784d0fc ("metag: Memory handling") Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: linux-metag@vger.kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/metag/lib/usercopy.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index a6ced9691ddb..714d8562aa20 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -538,23 +538,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
@@ -569,6 +577,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +591,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
@@ -588,11 +600,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
@@ -609,6 +625,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
EXPORT_SYMBOL(__copy_user);