summaryrefslogtreecommitdiffstats
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-03-02 12:02:22 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-04-06 10:55:50 +0200
commita5f1783be29adae15666fd803efd7d2979130869 (patch)
tree46c74857d0f8e38bb5a7f79af1b94c62257718de /lib/stackdepot.c
parent3123109284176b1532874591f7c81f3837bbdc17 (diff)
downloadlinux-a5f1783be29adae15666fd803efd7d2979130869.tar.gz
linux-a5f1783be29adae15666fd803efd7d2979130869.tar.bz2
linux-a5f1783be29adae15666fd803efd7d2979130869.zip
lib/stackdepot: allow requesting early initialization dynamically
In a later patch we want to add stackdepot support for object owner tracking in slub caches, which is enabled by slub_debug boot parameter. This creates a bootstrap problem as some caches are created early in boot when slab_is_available() is false and thus stack_depot_init() tries to use memblock. But, as reported by Hyeonggon Yoo [1] we are already beyond memblock_free_all(). Ideally memblock allocation should fail, yet it succeeds, but later the system crashes, which is a separately handled issue. To resolve this boostrap issue in a robust way, this patch adds another way to request stack_depot_early_init(), which happens at a well-defined point of time. In addition to build-time CONFIG_STACKDEPOT_ALWAYS_INIT, code that's e.g. processing boot parameters (which happens early enough) can call a new function stack_depot_want_early_init(), which sets a flag that stack_depot_early_init() will check. In this patch we also convert page_owner to this approach. While it doesn't have the bootstrap issue as slub, it's also a functionality enabled by a boot param and can thus request stack_depot_early_init() with memblock allocation instead of later initialization with kvmalloc(). As suggested by Mike, make stack_depot_early_init() only attempt memblock allocation and stack_depot_init() only attempt kvmalloc(). Also change the latter to kvcalloc(). In both cases we can lose the explicit array zeroing, which the allocations do already. As suggested by Marco, provide empty implementations of the init functions for !CONFIG_STACKDEPOT builds to simplify the callers. [1] https://lore.kernel.org/all/YhnUcqyeMgCrWZbd@ip-172-31-19-208.ap-northeast-1.compute.internal/ Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Suggested-by: Mike Rapoport <rppt@linux.ibm.com> Suggested-by: Marco Elver <elver@google.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Marco Elver <elver@google.com> Reviewed-and-tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: David Rientjes <rientjes@google.com>
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r--lib/stackdepot.c67
1 files changed, 45 insertions, 22 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index bf5ba9af0500..5ca0d086ef4a 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -66,6 +66,9 @@ struct stack_record {
unsigned long entries[]; /* Variable-sized array of entries. */
};
+static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
static int depot_index;
@@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str)
}
early_param("stack_depot_disable", is_stack_depot_disabled);
-/*
- * __ref because of memblock_alloc(), which will not be actually called after
- * the __init code is gone, because at that point slab_is_available() is true
- */
-__ref int stack_depot_init(void)
+void __init stack_depot_want_early_init(void)
+{
+ /* Too late to request early init now */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_want_early_init = true;
+}
+
+int __init stack_depot_early_init(void)
+{
+ size_t size;
+
+ /* This is supposed to be called only once, from mm_init() */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+
+ __stack_depot_early_init_passed = true;
+
+ if (!__stack_depot_want_early_init || stack_depot_disable)
+ return 0;
+
+ size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
+ pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n",
+ size);
+ stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
+
+ if (!stack_table) {
+ pr_err("Stack Depot hash table allocation failed, disabling\n");
+ stack_depot_disable = true;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
+ int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disable && !stack_table) {
- size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
- int i;
-
- if (slab_is_available()) {
- pr_info("Stack Depot allocating hash table with kvmalloc\n");
- stack_table = kvmalloc(size, GFP_KERNEL);
- } else {
- pr_info("Stack Depot allocating hash table with memblock_alloc\n");
- stack_table = memblock_alloc(size, SMP_CACHE_BYTES);
- }
- if (stack_table) {
- for (i = 0; i < STACK_HASH_SIZE; i++)
- stack_table[i] = NULL;
- } else {
+ pr_info("Stack Depot allocating hash table with kvcalloc\n");
+ stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
pr_err("Stack Depot hash table allocation failed, disabling\n");
stack_depot_disable = true;
- mutex_unlock(&stack_depot_init_mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
}
}
mutex_unlock(&stack_depot_init_mutex);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);