diff options
author | Andrey Konovalov <andreyknvl@google.com> | 2021-02-24 12:05:50 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-02-24 13:38:31 -0800 |
commit | 611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2 (patch) | |
tree | bc3e29573db25c2b8c21c03f03b5e9544aaa6013 /lib/test_kasan.c | |
parent | 027b37b552f326aa94ef06c7ea77088b16c41e6e (diff) | |
download | linux-611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2.tar.gz linux-611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2.tar.bz2 linux-611806b4bf8dd97a4f3d73f5cf3c2c7730c51eb2.zip |
kasan: fix bug detection via ksize for HW_TAGS mode
The currently existing kasan_check_read/write() annotations are intended
to be used for kernel modules that have KASAN compiler instrumentation
disabled. Thus, they are only relevant for the software KASAN modes that
rely on compiler instrumentation.
However there's another use case for these annotations: ksize() checks
that the object passed to it is indeed accessible before unpoisoning the
whole object. This is currently done via __kasan_check_read(), which is
compiled away for the hardware tag-based mode that doesn't rely on
compiler instrumentation. This leads to KASAN missing detecting some
memory corruptions.
Provide another annotation called kasan_check_byte() that is available
for all KASAN modes. As the implementation rename and reuse
kasan_check_invalid_free(). Use this new annotation in ksize().
To avoid having ksize() as the top frame in the reported stack trace
pass _RET_IP_ to __kasan_check_byte().
Also add a new ksize_uaf() test that checks that a use-after-free is
detected via ksize() itself, and via plain accesses that happen later.
Link: https://linux-review.googlesource.com/id/Iaabf771881d0f9ce1b969f2a62938e99d3308ec5
Link: https://lkml.kernel.org/r/f32ad74a60b28d8402482a38476f02bb7600f620.1610733117.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/test_kasan.c')
-rw-r--r-- | lib/test_kasan.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index e59f185b8075..3f771fabd0ec 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -496,6 +496,7 @@ static void kasan_global_oob(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } +/* Check that ksize() makes the whole object accessible. */ static void ksize_unpoisons_memory(struct kunit *test) { char *ptr; @@ -514,6 +515,24 @@ static void ksize_unpoisons_memory(struct kunit *test) kfree(ptr); } +/* + * Check that a use-after-free is detected by ksize() and via normal accesses + * after it. + */ +static void ksize_uaf(struct kunit *test) +{ + char *ptr; + int size = 128 - KASAN_GRANULE_SIZE; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + kfree(ptr); + + KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size)); +} + static void kasan_stack_oob(struct kunit *test) { char stack_array[10]; @@ -907,6 +926,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_alloca_oob_left), KUNIT_CASE(kasan_alloca_oob_right), KUNIT_CASE(ksize_unpoisons_memory), + KUNIT_CASE(ksize_uaf), KUNIT_CASE(kmem_cache_double_free), KUNIT_CASE(kmem_cache_invalid_free), KUNIT_CASE(kasan_memchr), |