summaryrefslogtreecommitdiffstats
path: root/mm/usercopy.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 16:25:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 16:25:42 -0800
commit617aebe6a97efa539cc4b8a52adccd89596e6be0 (patch)
tree51c7753c940fd3727b8cc3e93553c57f89d1d9d2 /mm/usercopy.c
parent0771ad44a20bc512d1123bac728d3a89ea6febe6 (diff)
parente47e311843dece8073146f3606871280ee9beb87 (diff)
downloadlinux-stable-617aebe6a97efa539cc4b8a52adccd89596e6be0.tar.gz
linux-stable-617aebe6a97efa539cc4b8a52adccd89596e6be0.tar.bz2
linux-stable-617aebe6a97efa539cc4b8a52adccd89596e6be0.zip
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardened usercopy whitelisting from Kees Cook: "Currently, hardened usercopy performs dynamic bounds checking on slab cache objects. This is good, but still leaves a lot of kernel memory available to be copied to/from userspace in the face of bugs. To further restrict what memory is available for copying, this creates a way to whitelist specific areas of a given slab cache object for copying to/from userspace, allowing much finer granularity of access control. Slab caches that are never exposed to userspace can declare no whitelist for their objects, thereby keeping them unavailable to userspace via dynamic copy operations. (Note, an implicit form of whitelisting is the use of constant sizes in usercopy operations and get_user()/put_user(); these bypass all hardened usercopy checks since these sizes cannot change at runtime.) This new check is WARN-by-default, so any mistakes can be found over the next several releases without breaking anyone's system. The series has roughly the following sections: - remove %p and improve reporting with offset - prepare infrastructure and whitelist kmalloc - update VFS subsystem with whitelists - update SCSI subsystem with whitelists - update network subsystem with whitelists - update process memory with whitelists - update per-architecture thread_struct with whitelists - update KVM with whitelists and fix ioctl bug - mark all other allocations as not whitelisted - update lkdtm for more sensible test overage" * tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits) lkdtm: Update usercopy tests for whitelisting usercopy: Restrict non-usercopy caches to size 0 kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl kvm: whitelist struct kvm_vcpu_arch arm: Implement thread_struct whitelist for hardened usercopy arm64: Implement thread_struct whitelist for hardened usercopy x86: Implement thread_struct whitelist for hardened usercopy fork: Provide usercopy whitelisting for task_struct fork: Define usercopy region in thread_stack slab caches fork: Define usercopy region in mm_struct slab caches net: Restrict unwhitelisted proto caches to size 0 sctp: Copy struct sctp_sock.autoclose to userspace using put_user() sctp: Define usercopy region in SCTP proto slab cache caif: Define usercopy region in caif proto slab cache ip: Define usercopy region in IP proto slab cache net: Define usercopy region in struct proto slab cache scsi: Define usercopy region in scsi_sense_cache slab cache cifs: Define usercopy region in cifs_request slab cache vxfs: Define usercopy region in vxfs_inode slab cache ufs: Define usercopy region in ufs_inode_cache slab cache ...
Diffstat (limited to 'mm/usercopy.c')
-rw-r--r--mm/usercopy.c133
1 files changed, 74 insertions, 59 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a9852b24715d..e9e9325f7638 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -58,12 +58,40 @@ static noinline int check_stack_object(const void *obj, unsigned long len)
return GOOD_STACK;
}
-static void report_usercopy(const void *ptr, unsigned long len,
- bool to_user, const char *type)
+/*
+ * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
+ * an unexpected state during a copy_from_user() or copy_to_user() call.
+ * There are several checks being performed on the buffer by the
+ * __check_object_size() function. Normal stack buffer usage should never
+ * trip the checks, and kernel text addressing will always trip the check.
+ * For cache objects, it is checking that only the whitelisted range of
+ * bytes for a given cache is being accessed (via the cache's usersize and
+ * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
+ * kmem_cache_create_usercopy() function to create the cache (and
+ * carefully audit the whitelist range).
+ */
+void usercopy_warn(const char *name, const char *detail, bool to_user,
+ unsigned long offset, unsigned long len)
+{
+ WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to",
+ name ? : "unknown?!",
+ detail ? " '" : "", detail ? : "", detail ? "'" : "",
+ offset, len);
+}
+
+void __noreturn usercopy_abort(const char *name, const char *detail,
+ bool to_user, unsigned long offset,
+ unsigned long len)
{
- pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
- to_user ? "exposure" : "overwrite",
- to_user ? "from" : "to", ptr, type ? : "unknown", len);
+ pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to",
+ name ? : "unknown?!",
+ detail ? " '" : "", detail ? : "", detail ? "'" : "",
+ offset, len);
+
/*
* For greater effect, it would be nice to do do_group_exit(),
* but BUG() actually hooks all the lock-breaking and per-arch
@@ -73,10 +101,10 @@ static void report_usercopy(const void *ptr, unsigned long len,
}
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
-static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
- unsigned long high)
+static bool overlaps(const unsigned long ptr, unsigned long n,
+ unsigned long low, unsigned long high)
{
- unsigned long check_low = (uintptr_t)ptr;
+ const unsigned long check_low = ptr;
unsigned long check_high = check_low + n;
/* Does not overlap if entirely above or entirely below. */
@@ -87,15 +115,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
}
/* Is this address range in the kernel text area? */
-static inline const char *check_kernel_text_object(const void *ptr,
- unsigned long n)
+static inline void check_kernel_text_object(const unsigned long ptr,
+ unsigned long n, bool to_user)
{
unsigned long textlow = (unsigned long)_stext;
unsigned long texthigh = (unsigned long)_etext;
unsigned long textlow_linear, texthigh_linear;
if (overlaps(ptr, n, textlow, texthigh))
- return "<kernel text>";
+ usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
/*
* Some architectures have virtual memory mappings with a secondary
@@ -108,32 +136,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */
if (textlow_linear == textlow)
- return NULL;
+ return;
/* Check the secondary mapping... */
texthigh_linear = (unsigned long)lm_alias(texthigh);
if (overlaps(ptr, n, textlow_linear, texthigh_linear))
- return "<linear kernel text>";
-
- return NULL;
+ usercopy_abort("linear kernel text", NULL, to_user,
+ ptr - textlow_linear, n);
}
-static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+ bool to_user)
{
/* Reject if object wraps past end of memory. */
- if ((unsigned long)ptr + n < (unsigned long)ptr)
- return "<wrapped address>";
+ if (ptr + n < ptr)
+ usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
/* Reject if NULL or ZERO-allocation. */
if (ZERO_OR_NULL_PTR(ptr))
- return "<null>";
-
- return NULL;
+ usercopy_abort("null address", NULL, to_user, ptr, n);
}
/* Checks for allocs that are marked in some way as spanning multiple pages. */
-static inline const char *check_page_span(const void *ptr, unsigned long n,
- struct page *page, bool to_user)
+static inline void check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
{
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
@@ -150,28 +176,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
if (ptr >= (const void *)__start_rodata &&
end <= (const void *)__end_rodata) {
if (!to_user)
- return "<rodata>";
- return NULL;
+ usercopy_abort("rodata", NULL, to_user, 0, n);
+ return;
}
/* Allow kernel data region (if not marked as Reserved). */
if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
- return NULL;
+ return;
/* Allow kernel bss region (if not marked as Reserved). */
if (ptr >= (const void *)__bss_start &&
end <= (const void *)__bss_stop)
- return NULL;
+ return;
/* Is the object wholly within one base page? */
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
((unsigned long)end & (unsigned long)PAGE_MASK)))
- return NULL;
+ return;
/* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end);
if (likely(endpage == page))
- return NULL;
+ return;
/*
* Reject if range is entirely either Reserved (i.e. special or
@@ -181,36 +207,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma)
- return "<spans multiple pages>";
+ usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page))
- return "<spans Reserved and non-Reserved pages>";
+ usercopy_abort("spans Reserved and non-Reserved pages",
+ NULL, to_user, 0, n);
if (is_cma && !is_migrate_cma_page(page))
- return "<spans CMA and non-CMA pages>";
+ usercopy_abort("spans CMA and non-CMA pages", NULL,
+ to_user, 0, n);
}
#endif
-
- return NULL;
}
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
- bool to_user)
+static inline void check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
{
struct page *page;
if (!virt_addr_valid(ptr))
- return NULL;
+ return;
page = virt_to_head_page(ptr);
- /* Check slab allocator for flags and size. */
- if (PageSlab(page))
- return __check_heap_object(ptr, n, page);
-
- /* Verify object does not incorrectly span multiple pages. */
- return check_page_span(ptr, n, page, to_user);
+ if (PageSlab(page)) {
+ /* Check slab allocator for flags and size. */
+ __check_heap_object(ptr, n, page, to_user);
+ } else {
+ /* Verify object does not incorrectly span multiple pages. */
+ check_page_span(ptr, n, page, to_user);
+ }
}
/*
@@ -221,21 +248,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
*/
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
{
- const char *err;
-
/* Skip all tests if size is zero. */
if (!n)
return;
/* Check for invalid addresses. */
- err = check_bogus_address(ptr, n);
- if (err)
- goto report;
+ check_bogus_address((const unsigned long)ptr, n, to_user);
/* Check for bad heap object. */
- err = check_heap_object(ptr, n, to_user);
- if (err)
- goto report;
+ check_heap_object(ptr, n, to_user);
/* Check for bad stack object. */
switch (check_stack_object(ptr, n)) {
@@ -251,16 +272,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
*/
return;
default:
- err = "<process stack>";
- goto report;
+ usercopy_abort("process stack", NULL, to_user, 0, n);
}
/* Check for object in kernel to avoid text exposure. */
- err = check_kernel_text_object(ptr, n);
- if (!err)
- return;
-
-report:
- report_usercopy(ptr, n, to_user, err);
+ check_kernel_text_object((const unsigned long)ptr, n, to_user);
}
EXPORT_SYMBOL(__check_object_size);