summaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-08 14:48:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-08 14:48:14 -0700
commit1eccfa090eaea22558570054bbdc147817e1df5e (patch)
treea0adfdb87319abef88f575ee34314649193b7e92 /include/linux/slab.h
parent1bd4403d86a1c06cb6cc9ac87664a0c9d3413d51 (diff)
parented18adc1cdd00a5c55a20fbdaed4804660772281 (diff)
downloadlinux-1eccfa090eaea22558570054bbdc147817e1df5e.tar.gz
linux-1eccfa090eaea22558570054bbdc147817e1df5e.tar.bz2
linux-1eccfa090eaea22558570054bbdc147817e1df5e.zip
Merge tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull usercopy protection from Kees Cook: "Tbhis implements HARDENED_USERCOPY verification of copy_to_user and copy_from_user bounds checking for most architectures on SLAB and SLUB" * tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: mm: SLUB hardened usercopy support mm: SLAB hardened usercopy support s390/uaccess: Enable hardened usercopy sparc/uaccess: Enable hardened usercopy powerpc/uaccess: Enable hardened usercopy ia64/uaccess: Enable hardened usercopy arm64/uaccess: Enable hardened usercopy ARM: uaccess: Enable hardened usercopy x86/uaccess: Enable hardened usercopy mm: Hardened usercopy mm: Implement stack frame object validation mm: Add is_migrate_cma_page
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1a4ea551aae5..4293808d8cfb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.