diff options
Diffstat (limited to 'mm/kasan/kasan.h')
-rw-r--r-- | mm/kasan/kasan.h | 173 |
1 files changed, 140 insertions, 33 deletions
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index ac499456740f..cc4d9e1d49b1 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -5,8 +5,32 @@ #include <linux/kasan.h> #include <linux/stackdepot.h> -#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) -#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) +#ifdef CONFIG_KASAN_HW_TAGS +#include <linux/static_key.h> +DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace); +static inline bool kasan_stack_collection_enabled(void) +{ + return static_branch_unlikely(&kasan_flag_stacktrace); +} +#else +static inline bool kasan_stack_collection_enabled(void) +{ + return true; +} +#endif + +extern bool kasan_flag_panic __ro_after_init; + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) +#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) +#else +#include <asm/mte-kasan.h> +#define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE +#endif + +#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1) + +#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT) #define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ #define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ @@ -56,6 +80,13 @@ #define KASAN_ABI_VERSION 1 #endif +/* Metadata layout customization. */ +#define META_BYTES_PER_BLOCK 1 +#define META_BLOCKS_PER_ROW 16 +#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK) +#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE) +#define META_ROWS_AROUND_ADDR 2 + struct kasan_access_info { const void *access_addr; const void *first_bad_addr; @@ -124,20 +155,33 @@ struct kasan_alloc_meta { struct qlist_node { struct qlist_node *next; }; + +/* + * Generic mode either stores free meta in the object itself or in the redzone + * after the object. In the former case free meta offset is 0, in the latter + * case it has some sane value smaller than INT_MAX. Use INT_MAX as free meta + * offset when free meta isn't present. + */ +#define KASAN_NO_FREE_META INT_MAX + struct kasan_free_meta { +#ifdef CONFIG_KASAN_GENERIC /* This field is used while the object is in the quarantine. * Otherwise it might be used for the allocator freelist. */ struct qlist_node quarantine_link; -#ifdef CONFIG_KASAN_GENERIC struct kasan_track free_track; #endif }; -struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, - const void *object); -struct kasan_free_meta *get_free_info(struct kmem_cache *cache, - const void *object); +struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, + const void *object); +#ifdef CONFIG_KASAN_GENERIC +struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, + const void *object); +#endif + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) static inline const void *kasan_shadow_to_mem(const void *shadow_addr) { @@ -145,13 +189,11 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr) << KASAN_SHADOW_SCALE_SHIFT); } -static inline bool addr_has_shadow(const void *addr) +static inline bool addr_has_metadata(const void *addr) { return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); } -void kasan_poison_shadow(const void *address, size_t size, u8 value); - /** * check_memory_region - Check memory region, and report if invalid access. * @addr: the accessed address @@ -163,8 +205,30 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value); bool check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip); +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline bool addr_has_metadata(const void *addr) +{ + return true; +} + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) +void print_tags(u8 addr_tag, const void *addr); +#else +static inline void print_tags(u8 addr_tag, const void *addr) { } +#endif + void *find_first_bad_addr(void *addr, size_t size); const char *get_bug_type(struct kasan_access_info *info); +void metadata_fetch_row(char *buffer, void *row); + +#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK +void print_address_stack_frame(const void *addr); +#else +static inline void print_address_stack_frame(const void *addr) { } +#endif bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); @@ -180,49 +244,92 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, #if defined(CONFIG_KASAN_GENERIC) && \ (defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) -void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); +bool quarantine_put(struct kmem_cache *cache, void *object); void quarantine_reduce(void); void quarantine_remove_cache(struct kmem_cache *cache); #else -static inline void quarantine_put(struct kasan_free_meta *info, - struct kmem_cache *cache) { } +static inline bool quarantine_put(struct kmem_cache *cache, void *object) { return false; } static inline void quarantine_reduce(void) { } static inline void quarantine_remove_cache(struct kmem_cache *cache) { } #endif -#ifdef CONFIG_KASAN_SW_TAGS +#ifndef arch_kasan_set_tag +static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) +{ + return addr; +} +#endif +#ifndef arch_kasan_get_tag +#define arch_kasan_get_tag(addr) 0 +#endif -void print_tags(u8 addr_tag, const void *addr); +#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) +#define get_tag(addr) arch_kasan_get_tag(addr) -u8 random_tag(void); +#ifdef CONFIG_KASAN_HW_TAGS + +#ifndef arch_enable_tagging +#define arch_enable_tagging() +#endif +#ifndef arch_init_tags +#define arch_init_tags(max_tag) +#endif +#ifndef arch_get_random_tag +#define arch_get_random_tag() (0xFF) +#endif +#ifndef arch_get_mem_tag +#define arch_get_mem_tag(addr) (0xFF) +#endif +#ifndef arch_set_mem_tag_range +#define arch_set_mem_tag_range(addr, size, tag) ((void *)(addr)) +#endif + +#define hw_enable_tagging() arch_enable_tagging() +#define hw_init_tags(max_tag) arch_init_tags(max_tag) +#define hw_get_random_tag() arch_get_random_tag() +#define hw_get_mem_tag(addr) arch_get_mem_tag(addr) +#define hw_set_mem_tag_range(addr, size, tag) arch_set_mem_tag_range((addr), (size), (tag)) +#endif /* CONFIG_KASAN_HW_TAGS */ + +#ifdef CONFIG_KASAN_SW_TAGS +u8 random_tag(void); +#elif defined(CONFIG_KASAN_HW_TAGS) +static inline u8 random_tag(void) { return hw_get_random_tag(); } #else +static inline u8 random_tag(void) { return 0; } +#endif -static inline void print_tags(u8 addr_tag, const void *addr) { } +#ifdef CONFIG_KASAN_HW_TAGS -static inline u8 random_tag(void) +static inline void poison_range(const void *address, size_t size, u8 value) { - return 0; + hw_set_mem_tag_range(kasan_reset_tag(address), + round_up(size, KASAN_GRANULE_SIZE), value); } -#endif +static inline void unpoison_range(const void *address, size_t size) +{ + hw_set_mem_tag_range(kasan_reset_tag(address), + round_up(size, KASAN_GRANULE_SIZE), get_tag(address)); +} -#ifndef arch_kasan_set_tag -static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) +static inline bool check_invalid_free(void *addr) { - return addr; + u8 ptr_tag = get_tag(addr); + u8 mem_tag = hw_get_mem_tag(addr); + + return (mem_tag == KASAN_TAG_INVALID) || + (ptr_tag != KASAN_TAG_KERNEL && ptr_tag != mem_tag); } -#endif -#ifndef arch_kasan_reset_tag -#define arch_kasan_reset_tag(addr) ((void *)(addr)) -#endif -#ifndef arch_kasan_get_tag -#define arch_kasan_get_tag(addr) 0 -#endif -#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) -#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr)) -#define get_tag(addr) arch_kasan_get_tag(addr) +#else /* CONFIG_KASAN_HW_TAGS */ + +void poison_range(const void *address, size_t size, u8 value); +void unpoison_range(const void *address, size_t size); +bool check_invalid_free(void *addr); + +#endif /* CONFIG_KASAN_HW_TAGS */ /* * Exported functions for interfaces called from assembly or from generated |