summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2022-01-21 22:14:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-22 08:33:38 +0200
commite940066089490efde86abc519593be84362f4e53 (patch)
treeffd6a2977d1065ee08ec5fe54123afc455e1e841
parent2dba5eb1c73b6ba2988ced07250edeac0f8cbf5a (diff)
downloadlinux-e940066089490efde86abc519593be84362f4e53.tar.gz
linux-e940066089490efde86abc519593be84362f4e53.tar.bz2
linux-e940066089490efde86abc519593be84362f4e53.zip
lib/stackdepot: always do filter_irq_stacks() in stack_depot_save()
The non-interrupt portion of interrupt stack traces before interrupt entry is usually arbitrary. Therefore, saving stack traces of interrupts (that include entries before interrupt entry) to stack depot leads to unbounded stackdepot growth. As such, use of filter_irq_stacks() is a requirement to ensure stackdepot can efficiently deduplicate interrupt stacks. Looking through all current users of stack_depot_save(), none (except KASAN) pass the stack trace through filter_irq_stacks() before passing it on to stack_depot_save(). Rather than adding filter_irq_stacks() to all current users of stack_depot_save(), it became clear that stack_depot_save() should simply do filter_irq_stacks(). Link: https://lkml.kernel.org/r/20211130095727.2378739-1-elver@google.com Signed-off-by: Marco Elver <elver@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Vijayanand Jitta <vjitta@codeaurora.org> Cc: "Gustavo A. R. Silva" <gustavoars@kernel.org> Cc: Imran Khan <imran.f.khan@oracle.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/stackdepot.c13
-rw-r--r--mm/kasan/common.c1
2 files changed, 13 insertions, 1 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 00ccb106f1a8..bf5ba9af0500 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -328,6 +328,9 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and will fail if no space is left to store the stack trace.
*
+ * If the stack trace in @entries is from an interrupt, only the portion up to
+ * interrupt entry is saved.
+ *
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
@@ -346,6 +349,16 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned long flags;
u32 hash;
+ /*
+ * If this stack trace is from an interrupt, including anything before
+ * interrupt entry usually leads to unbounded stackdepot growth.
+ *
+ * Because use of filter_irq_stacks() is a requirement to ensure
+ * stackdepot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stackdepot.
+ */
+ nr_entries = filter_irq_stacks(entries, nr_entries);
+
if (unlikely(nr_entries == 0) || stack_depot_disable)
goto fast_exit;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7c06db78a76c..92196562687b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -36,7 +36,6 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- nr_entries = filter_irq_stacks(entries, nr_entries);
return __stack_depot_save(entries, nr_entries, flags, can_alloc);
}