summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--init/main.c2
-rw-r--r--lib/debugobjects.c8
2 files changed, 4 insertions, 6 deletions
diff --git a/init/main.c b/init/main.c
index 954d9b6c62c6..f6e901ec6b78 100644
--- a/init/main.c
+++ b/init/main.c
@@ -521,6 +521,7 @@ static void __init mm_init(void)
mem_init();
kmem_cache_init();
pgtable_init();
+ debug_objects_mem_init();
vmalloc_init();
ioremap_huge_init();
/* Should be run before the first non-init thread is created */
@@ -697,7 +698,6 @@ asmlinkage __visible void __init start_kernel(void)
#endif
page_ext_init();
kmemleak_init();
- debug_objects_mem_init();
setup_per_cpu_pageset();
numa_policy_init();
acpi_early_init();
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 14afeeb7d6ef..55437fd5128b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1131,11 +1131,10 @@ static int __init debug_objects_replace_static_objects(void)
}
/*
- * When debug_objects_mem_init() is called we know that only
- * one CPU is up, so disabling interrupts is enough
- * protection. This avoids the lockdep hell of lock ordering.
+ * debug_objects_mem_init() is now called early that only one CPU is up
+ * and interrupts have been disabled, so it is safe to replace the
+ * active object references.
*/
- local_irq_disable();
/* Remove the statically allocated objects from the pool */
hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
@@ -1156,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void)
cnt++;
}
}
- local_irq_enable();
pr_debug("%d of %d active objects replaced\n",
cnt, obj_pool_used);