summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2016-05-20 16:59:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 17:58:30 -0700
commit1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef (patch)
tree5d3110f86fb5068fcffeebae794b49ddb2cd00d6
parent64f8ebaf115bcddc4aaa902f981c57ba6506bc42 (diff)
downloadlinux-1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef.tar.gz
linux-1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef.tar.bz2
linux-1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef.zip
x86/kasan: instrument user memory access API
Exchange between user and kernel memory is coded in assembly language. Which means that such accesses won't be spotted by KASAN as a compiler instruments only C code. Add explicit KASAN checks to user memory access API to ensure that userspace writes to (or reads from) a valid kernel memory. Note: Unlike others strncpy_from_user() is written mostly in C and KASAN sees memory accesses in it. However, it makes sense to add explicit check for all @count bytes that *potentially* could be written to the kernel. [aryabinin@virtuozzo.com: move kasan check under the condition] Link: http://lkml.kernel.org/r/1462869209-21096-1-git-send-email-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/1462538722-1574-4-git-send-email-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/include/asm/uaccess.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h7
-rw-r--r--lib/strncpy_from_user.c2
3 files changed, 14 insertions, 0 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 12f9653bde8d..2982387ba817 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -5,6 +5,7 @@
*/
#include <linux/errno.h>
#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
#include <linux/thread_info.h>
#include <linux/string.h>
#include <asm/asm.h>
@@ -721,6 +722,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
might_fault();
+ kasan_check_write(to, n);
+
/*
* While we would like to have the compiler do the checking for us
* even in the non-constant size case, any false positives there are
@@ -754,6 +757,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
+ kasan_check_read(from, n);
+
might_fault();
/* See the comment in copy_from_user() above. */
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 307698688fa1..2eac2aa3e37f 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/lockdep.h>
+#include <linux/kasan-checks.h>
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
@@ -109,6 +110,7 @@ static __always_inline __must_check
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
might_fault();
+ kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
@@ -175,6 +177,7 @@ static __always_inline __must_check
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
might_fault();
+ kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
}
@@ -242,12 +245,14 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
+ kasan_check_write(dst, size);
return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
+ kasan_check_read(src, size);
return __copy_to_user_nocheck(dst, src, size);
}
@@ -258,6 +263,7 @@ static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
might_fault();
+ kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 1);
}
@@ -265,6 +271,7 @@ static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
+ kasan_check_write(dst, size);
return __copy_user_nocache(dst, src, size, 0);
}
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 33840324138c..33f655ef48cd 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,5 +1,6 @@
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/kasan-checks.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -109,6 +110,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ kasan_check_write(dst, count);
user_access_begin();
retval = do_strncpy_from_user(dst, src, count, max);
user_access_end();