summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAlexander Potapenko <glider@google.com>2016-03-25 14:22:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-25 16:37:42 -0700
commitcd11016e5f5212c13c0cec7384a525edc93b4921 (patch)
tree31e2efd8d8eb6be398ccd0789bb9e865b299dc68 /mm
parentbe7635e7287e0e8013af3c89a6354a9e0182594c (diff)
downloadlinux-cd11016e5f5212c13c0cec7384a525edc93b4921.tar.gz
linux-cd11016e5f5212c13c0cec7384a525edc93b4921.tar.bz2
linux-cd11016e5f5212c13c0cec7384a525edc93b4921.zip
mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
Implement the stack depot and provide CONFIG_STACKDEPOT. Stack depot will allow KASAN store allocation/deallocation stack traces for memory chunks. The stack traces are stored in a hash table and referenced by handles which reside in the kasan_alloc_meta and kasan_free_meta structures in the allocated memory chunks. IRQ stack traces are cut below the IRQ entry point to avoid unnecessary duplication. Right now stackdepot support is only enabled in SLAB allocator. Once KASAN features in SLAB are on par with those in SLUB we can switch SLUB to stackdepot as well, thus removing the dependency on SLUB stack bookkeeping, which wastes a lot of memory. This patch is based on the "mm: kasan: stack depots" patch originally prepared by Dmitry Chernenkov. Joonsoo has said that he plans to reuse the stackdepot code for the mm/page_owner.c debugging facility. [akpm@linux-foundation.org: s/depot_stack_handle/depot_stack_handle_t] [aryabinin@virtuozzo.com: comment style fixes] Signed-off-by: Alexander Potapenko <glider@google.com> Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/kasan.c55
-rw-r--r--mm/kasan/kasan.h11
-rw-r--r--mm/kasan/report.c12
3 files changed, 66 insertions, 12 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index cb998e0ec9d3..acb3b6c4dd89 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -17,7 +17,9 @@
#define DISABLE_BRANCH_PROFILING
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/linkage.h>
@@ -32,7 +34,6 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
-#include <linux/kasan.h>
#include "kasan.h"
#include "../slab.h"
@@ -413,23 +414,65 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
#endif
}
-static inline void set_track(struct kasan_track *track)
+#ifdef CONFIG_SLAB
+static inline int in_irqentry_text(unsigned long ptr)
+{
+ return (ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end) ||
+ (ptr >= (unsigned long)&__softirqentry_text_start &&
+ ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+static inline void filter_irq_stacks(struct stack_trace *trace)
+{
+ int i;
+
+ if (!trace->nr_entries)
+ return;
+ for (i = 0; i < trace->nr_entries; i++)
+ if (in_irqentry_text(trace->entries[i])) {
+ /* Include the irqentry function into the stack. */
+ trace->nr_entries = i + 1;
+ break;
+ }
+}
+
+static inline depot_stack_handle_t save_stack(gfp_t flags)
+{
+ unsigned long entries[KASAN_STACK_DEPTH];
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = KASAN_STACK_DEPTH,
+ .skip = 0
+ };
+
+ save_stack_trace(&trace);
+ filter_irq_stacks(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ return depot_save_stack(&trace, flags);
+}
+
+static inline void set_track(struct kasan_track *track, gfp_t flags)
{
- track->cpu = raw_smp_processor_id();
track->pid = current->pid;
- track->when = jiffies;
+ track->stack = save_stack(flags);
}
-#ifdef CONFIG_SLAB
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object)
{
+ BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
return (void *)object + cache->kasan_info.alloc_meta_offset;
}
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object)
{
+ BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
return (void *)object + cache->kasan_info.free_meta_offset;
}
#endif
@@ -486,7 +529,7 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
alloc_info->state = KASAN_STATE_ALLOC;
alloc_info->alloc_size = size;
- set_track(&alloc_info->track);
+ set_track(&alloc_info->track, flags);
}
#endif
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 7b9e4ab9b66b..30a2f0ba0e09 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -2,6 +2,7 @@
#define __MM_KASAN_KASAN_H
#include <linux/kasan.h>
+#include <linux/stackdepot.h>
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
@@ -64,16 +65,18 @@ enum kasan_state {
KASAN_STATE_FREE
};
+#define KASAN_STACK_DEPTH 64
+
struct kasan_track {
- u64 cpu : 6; /* for NR_CPUS = 64 */
- u64 pid : 16; /* 65536 processes */
- u64 when : 42; /* ~140 years */
+ u32 pid;
+ depot_stack_handle_t stack;
};
struct kasan_alloc_meta {
+ struct kasan_track track;
u32 state : 2; /* enum kasan_state */
u32 alloc_size : 30;
- struct kasan_track track;
+ u32 reserved;
};
struct kasan_free_meta {
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 3e3385cc97ac..60869a5a0124 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -18,6 +18,7 @@
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -118,8 +119,15 @@ static inline bool init_task_stack_addr(const void *addr)
#ifdef CONFIG_SLAB
static void print_track(struct kasan_track *track)
{
- pr_err("PID = %u, CPU = %u, timestamp = %lu\n", track->pid,
- track->cpu, (unsigned long)track->when);
+ pr_err("PID = %u\n", track->pid);
+ if (track->stack) {
+ struct stack_trace trace;
+
+ depot_fetch_stack(track->stack, &trace);
+ print_stack_trace(&trace, 0);
+ } else {
+ pr_err("(stack is not available)\n");
+ }
}
static void object_err(struct kmem_cache *cache, struct page *page,