summaryrefslogtreecommitdiffstats
path: root/mm/kmsan
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-02-10 22:16:03 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-16 20:43:51 -0800
commit36aa1e6779c3c6f8e0d4552544214f5cffe3c287 (patch)
tree1639d4bb581f0baab0ebed256c8e2af3a0fa6c5e /mm/kmsan
parentd11a5621f3252120dfc7cef7600a90bd8e605caf (diff)
downloadlinux-36aa1e6779c3c6f8e0d4552544214f5cffe3c287.tar.gz
linux-36aa1e6779c3c6f8e0d4552544214f5cffe3c287.tar.bz2
linux-36aa1e6779c3c6f8e0d4552544214f5cffe3c287.zip
lib/stacktrace, kasan, kmsan: rework extra_bits interface
The current implementation of the extra_bits interface is confusing: passing extra_bits to __stack_depot_save makes it seem that the extra bits are somehow stored in stack depot. In reality, they are only embedded into a stack depot handle and are not used within stack depot. Drop the extra_bits argument from __stack_depot_save and instead provide a new stack_depot_set_extra_bits function (similar to the exsiting stack_depot_get_extra_bits) that saves extra bits into a stack depot handle. Update the callers of __stack_depot_save to use the new interace. This change also fixes a minor issue in the old code: __stack_depot_save does not return NULL if saving stack trace fails and extra_bits is used. Link: https://lkml.kernel.org/r/317123b5c05e2f82854fc55d8b285e0869d3cb77.1676063693.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/kmsan')
-rw-r--r--mm/kmsan/core.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index 112dce135c7f..f710257d6867 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -69,13 +69,15 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
{
unsigned long entries[KMSAN_STACK_DEPTH];
unsigned int nr_entries;
+ depot_stack_handle_t handle;
nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
/* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
flags &= ~__GFP_DIRECT_RECLAIM;
- return __stack_depot_save(entries, nr_entries, extra, flags, true);
+ handle = __stack_depot_save(entries, nr_entries, flags, true);
+ return stack_depot_set_extra_bits(handle, extra);
}
/* Copy the metadata following the memmove() behavior. */
@@ -215,6 +217,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
u32 extra_bits;
int depth;
bool uaf;
+ depot_stack_handle_t handle;
if (!id)
return id;
@@ -250,8 +253,9 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
* positives when __stack_depot_save() passes it to instrumented code.
*/
kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
- return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits,
- GFP_ATOMIC, true);
+ handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC,
+ true);
+ return stack_depot_set_extra_bits(handle, extra_bits);
}
void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,