diff options
author | Andrey Ryabinin <a.ryabinin@samsung.com> | 2015-02-13 14:39:42 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-13 21:21:41 -0800 |
commit | 0316bec22ec95ea2faca6406437b0b5950553b7c (patch) | |
tree | 6a278e1515188e738df2b04e9ada375215b3df22 /mm/kasan/report.c | |
parent | a79316c6178ca419e35feef47d47f50b4e0ee9f2 (diff) | |
download | linux-0316bec22ec95ea2faca6406437b0b5950553b7c.tar.gz linux-0316bec22ec95ea2faca6406437b0b5950553b7c.tar.bz2 linux-0316bec22ec95ea2faca6406437b0b5950553b7c.zip |
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by
slub. Initially all objects in newly allocated slab page, marked as
redzone. Later, when allocation of slub object happens, requested by
caller number of bytes marked as accessible, and the rest of the object
(including slub's metadata) marked as redzone (inaccessible).
We also mark object as accessible if ksize was called for this object.
There is some places in kernel where ksize function is called to inquire
size of really allocated area. Such callers could validly access whole
allocated memory, so it should be marked as accessible.
Code in slub.c and slab_common.c files could validly access to object's
metadata, so instrumentation for this files are disabled.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Signed-off-by: Dmitry Chernenkov <dmitryc@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Signed-off-by: Andrey Konovalov <adech.fo@gmail.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/kasan/report.c')
-rw-r--r-- | mm/kasan/report.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index fab8e7882ff1..2760edb4d0a8 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -24,6 +24,7 @@ #include <linux/kasan.h> #include "kasan.h" +#include "../slab.h" /* Shadow layout customization. */ #define SHADOW_BYTES_PER_BLOCK 1 @@ -55,8 +56,11 @@ static void print_error_description(struct kasan_access_info *info) switch (shadow_val) { case KASAN_FREE_PAGE: + case KASAN_KMALLOC_FREE: bug_type = "use after free"; break; + case KASAN_PAGE_REDZONE: + case KASAN_KMALLOC_REDZONE: case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: bug_type = "out of bounds access"; break; @@ -77,6 +81,23 @@ static void print_address_description(struct kasan_access_info *info) if ((addr >= (void *)PAGE_OFFSET) && (addr < high_memory)) { struct page *page = virt_to_head_page(addr); + + if (PageSlab(page)) { + void *object; + struct kmem_cache *cache = page->slab_cache; + void *last_object; + + object = virt_to_obj(cache, page_address(page), addr); + last_object = page_address(page) + + page->objects * cache->size; + + if (unlikely(object > last_object)) + object = last_object; /* we hit into padding */ + + object_err(cache, page, object, + "kasan: bad access detected"); + return; + } dump_page(page, "kasan: bad access detected"); } |