summaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 16:25:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-03 16:25:42 -0800
commit617aebe6a97efa539cc4b8a52adccd89596e6be0 (patch)
tree51c7753c940fd3727b8cc3e93553c57f89d1d9d2 /drivers/misc
parent0771ad44a20bc512d1123bac728d3a89ea6febe6 (diff)
parente47e311843dece8073146f3606871280ee9beb87 (diff)
downloadlinux-617aebe6a97efa539cc4b8a52adccd89596e6be0.tar.gz
linux-617aebe6a97efa539cc4b8a52adccd89596e6be0.tar.bz2
linux-617aebe6a97efa539cc4b8a52adccd89596e6be0.zip
Merge tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardened usercopy whitelisting from Kees Cook: "Currently, hardened usercopy performs dynamic bounds checking on slab cache objects. This is good, but still leaves a lot of kernel memory available to be copied to/from userspace in the face of bugs. To further restrict what memory is available for copying, this creates a way to whitelist specific areas of a given slab cache object for copying to/from userspace, allowing much finer granularity of access control. Slab caches that are never exposed to userspace can declare no whitelist for their objects, thereby keeping them unavailable to userspace via dynamic copy operations. (Note, an implicit form of whitelisting is the use of constant sizes in usercopy operations and get_user()/put_user(); these bypass all hardened usercopy checks since these sizes cannot change at runtime.) This new check is WARN-by-default, so any mistakes can be found over the next several releases without breaking anyone's system. The series has roughly the following sections: - remove %p and improve reporting with offset - prepare infrastructure and whitelist kmalloc - update VFS subsystem with whitelists - update SCSI subsystem with whitelists - update network subsystem with whitelists - update process memory with whitelists - update per-architecture thread_struct with whitelists - update KVM with whitelists and fix ioctl bug - mark all other allocations as not whitelisted - update lkdtm for more sensible test overage" * tag 'usercopy-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: (38 commits) lkdtm: Update usercopy tests for whitelisting usercopy: Restrict non-usercopy caches to size 0 kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl kvm: whitelist struct kvm_vcpu_arch arm: Implement thread_struct whitelist for hardened usercopy arm64: Implement thread_struct whitelist for hardened usercopy x86: Implement thread_struct whitelist for hardened usercopy fork: Provide usercopy whitelisting for task_struct fork: Define usercopy region in thread_stack slab caches fork: Define usercopy region in mm_struct slab caches net: Restrict unwhitelisted proto caches to size 0 sctp: Copy struct sctp_sock.autoclose to userspace using put_user() sctp: Define usercopy region in SCTP proto slab cache caif: Define usercopy region in caif proto slab cache ip: Define usercopy region in IP proto slab cache net: Define usercopy region in struct proto slab cache scsi: Define usercopy region in scsi_sense_cache slab cache cifs: Define usercopy region in cifs_request slab cache vxfs: Define usercopy region in vxfs_inode slab cache ufs: Define usercopy region in ufs_inode_cache slab cache ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/lkdtm.h4
-rw-r--r--drivers/misc/lkdtm_core.c4
-rw-r--r--drivers/misc/lkdtm_usercopy.c101
3 files changed, 62 insertions, 47 deletions
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index 687a0dbbe199..9e513dcfd809 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
-void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 4942da93d066..2154d1bfd18b 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(ATOMIC_TIMING),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
- CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index a64372cc148d..9725aed305bb 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -20,7 +20,7 @@
*/
static volatile size_t unconst = 0;
static volatile size_t cache_size = 1024;
-static struct kmem_cache *bad_cache;
+static struct kmem_cache *whitelist_cache;
static const unsigned char test_text[] = "This is a test.\n";
@@ -115,10 +115,16 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This checks for whole-object size validation with hardened usercopy,
+ * with or without usercopy whitelisting.
+ */
static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
+ void __user *test_user_addr;
+ void *test_kern_addr;
size_t size = unconst + 1024;
one = kmalloc(size, GFP_KERNEL);
@@ -139,27 +145,30 @@ static void do_usercopy_heap_size(bool to_user)
memset(one, 'A', size);
memset(two, 'B', size);
+ test_user_addr = (void __user *)(user_addr + 16);
+ test_kern_addr = one + 16;
+
if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
- if (copy_to_user((void __user *)user_addr, one, size)) {
+ if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of too large size\n");
- if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
+ if (copy_to_user(test_user_addr, test_kern_addr, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
- if (copy_from_user(one, (void __user *)user_addr, size)) {
+ if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of too large size\n");
- if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
+ if (copy_from_user(test_kern_addr, test_user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
@@ -172,77 +181,79 @@ free_kernel:
kfree(two);
}
-static void do_usercopy_heap_flag(bool to_user)
+/*
+ * This checks for the specific whitelist window within an object. If this
+ * test passes, then do_usercopy_heap_size() tests will pass too.
+ */
+static void do_usercopy_heap_whitelist(bool to_user)
{
- unsigned long user_addr;
- unsigned char *good_buf = NULL;
- unsigned char *bad_buf = NULL;
+ unsigned long user_alloc;
+ unsigned char *buf = NULL;
+ unsigned char __user *user_addr;
+ size_t offset, size;
/* Make sure cache was prepared. */
- if (!bad_cache) {
+ if (!whitelist_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}
/*
- * Allocate one buffer from each cache (kmalloc will have the
- * SLAB_USERCOPY flag already, but "bad_cache" won't).
+ * Allocate a buffer with a whitelisted window in the buffer.
*/
- good_buf = kmalloc(cache_size, GFP_KERNEL);
- bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
- if (!good_buf || !bad_buf) {
- pr_warn("Failed to allocate buffers from caches\n");
+ buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("Failed to allocate buffer from whitelist cache\n");
goto free_alloc;
}
/* Allocate user memory we'll poke at. */
- user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
- if (user_addr >= TASK_SIZE) {
+ if (user_alloc >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
+ user_addr = (void __user *)user_alloc;
- memset(good_buf, 'A', cache_size);
- memset(bad_buf, 'B', cache_size);
+ memset(buf, 'B', cache_size);
+
+ /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
+ offset = (cache_size / 4) + unconst;
+ size = (cache_size / 16) + unconst;
if (to_user) {
- pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
- if (copy_to_user((void __user *)user_addr, good_buf,
- cache_size)) {
+ pr_info("attempting good copy_to_user inside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
- pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
- if (copy_to_user((void __user *)user_addr, bad_buf,
- cache_size)) {
+ pr_info("attempting bad copy_to_user outside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset - 1, size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
- pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
- if (copy_from_user(good_buf, (void __user *)user_addr,
- cache_size)) {
+ pr_info("attempting good copy_from_user inside whitelist\n");
+ if (copy_from_user(buf + offset, user_addr, size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
- pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
- if (copy_from_user(bad_buf, (void __user *)user_addr,
- cache_size)) {
+ pr_info("attempting bad copy_from_user outside whitelist\n");
+ if (copy_from_user(buf + offset - 1, user_addr, size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
- vm_munmap(user_addr, PAGE_SIZE);
+ vm_munmap(user_alloc, PAGE_SIZE);
free_alloc:
- if (bad_buf)
- kmem_cache_free(bad_cache, bad_buf);
- kfree(good_buf);
+ if (buf)
+ kmem_cache_free(whitelist_cache, buf);
}
/* Callable tests. */
@@ -256,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
do_usercopy_heap_size(false);
}
-void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
{
- do_usercopy_heap_flag(true);
+ do_usercopy_heap_whitelist(true);
}
-void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
{
- do_usercopy_heap_flag(false);
+ do_usercopy_heap_whitelist(false);
}
void lkdtm_USERCOPY_STACK_FRAME_TO(void)
@@ -314,11 +325,15 @@ free_user:
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
- bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
- 0, NULL);
+ whitelist_cache =
+ kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
+ 0, 0,
+ cache_size / 4,
+ cache_size / 16,
+ NULL);
}
void __exit lkdtm_usercopy_exit(void)
{
- kmem_cache_destroy(bad_cache);
+ kmem_cache_destroy(whitelist_cache);
}