diff options
author | Peter Collingbourne <pcc@google.com> | 2021-06-02 16:52:28 -0700 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2021-06-04 19:32:21 +0100 |
commit | 7a3b835371883558eb63e069d891bd87f562380d (patch) | |
tree | ce4fb6dc7bc68fd8047f7d65b0b55caa310c1b68 /include/linux/kasan.h | |
parent | 92638b4e1b47f97d7269e74465dedf73096f777d (diff) | |
download | linux-stable-7a3b835371883558eb63e069d891bd87f562380d.tar.gz linux-stable-7a3b835371883558eb63e069d891bd87f562380d.tar.bz2 linux-stable-7a3b835371883558eb63e069d891bd87f562380d.zip |
kasan: use separate (un)poison implementation for integrated init
Currently with integrated init page_alloc.c needs to know whether
kasan_alloc_pages() will zero initialize memory, but this will start
becoming more complicated once we start adding tag initialization
support for user pages. To avoid page_alloc.c needing to know more
details of what integrated init will do, move the unpoisoning logic
for integrated init into the HW tags implementation. Currently the
logic is identical but it will diverge in subsequent patches.
For symmetry do the same for poisoning although this logic will
be unaffected by subsequent patches.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Link: https://linux-review.googlesource.com/id/I2c550234c6c4a893c48c18ff0c6ce658c7c67056
Link: https://lore.kernel.org/r/20210602235230.3928842-3-pcc@google.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'include/linux/kasan.h')
-rw-r--r-- | include/linux/kasan.h | 64 |
1 files changed, 38 insertions, 26 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b1678a61e6a7..a1c7ce5f3e4f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -2,6 +2,7 @@ #ifndef _LINUX_KASAN_H #define _LINUX_KASAN_H +#include <linux/bug.h> #include <linux/static_key.h> #include <linux/types.h> @@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {} #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -#ifdef CONFIG_KASAN - -struct kasan_cache { - int alloc_meta_offset; - int free_meta_offset; - bool is_kmalloc; -}; - #ifdef CONFIG_KASAN_HW_TAGS DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); @@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void) return kasan_enabled(); } +void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); +void kasan_free_pages(struct page *page, unsigned int order); + #else /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_enabled(void) { - return true; + return IS_ENABLED(CONFIG_KASAN); } static inline bool kasan_has_integrated_init(void) @@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void) return false; } +static __always_inline void kasan_alloc_pages(struct page *page, + unsigned int order, gfp_t flags) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + +static __always_inline void kasan_free_pages(struct page *page, + unsigned int order) +{ + /* Only available for integrated init. */ + BUILD_BUG(); +} + #endif /* CONFIG_KASAN_HW_TAGS */ +#ifdef CONFIG_KASAN + +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; + bool is_kmalloc; +}; + slab_flags_t __kasan_never_merge(void); static __always_inline slab_flags_t kasan_never_merge(void) { @@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size) __kasan_unpoison_range(addr, size); } -void __kasan_alloc_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_alloc_pages(struct page *page, +void __kasan_poison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_poison_pages(struct page *page, unsigned int order, bool init) { if (kasan_enabled()) - __kasan_alloc_pages(page, order, init); + __kasan_poison_pages(page, order, init); } -void __kasan_free_pages(struct page *page, unsigned int order, bool init); -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order, bool init) +void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); +static __always_inline void kasan_unpoison_pages(struct page *page, + unsigned int order, bool init) { if (kasan_enabled()) - __kasan_free_pages(page, order, init); + __kasan_unpoison_pages(page, order, init); } void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, @@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ -static inline bool kasan_enabled(void) -{ - return false; -} -static inline bool kasan_has_integrated_init(void) -{ - return false; -} static inline slab_flags_t kasan_never_merge(void) { return 0; } static inline void kasan_unpoison_range(const void *address, size_t size) {} -static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {} -static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {} +static inline void kasan_poison_pages(struct page *page, unsigned int order, + bool init) {} +static inline void kasan_unpoison_pages(struct page *page, unsigned int order, + bool init) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} |