summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMike Rapoport (IBM) <rppt@kernel.org>2023-03-21 19:05:08 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-04-05 19:42:54 -0700
commitf2fc4b44ec2bb94c51c7ae1af9b1177d72705992 (patch)
treea83ba868392eb11ef980295f813541be4ff63d59 /mm
parent4cd1e9edf60efb20fad35cf5e9ade7ad75b34cd1 (diff)
downloadlinux-stable-f2fc4b44ec2bb94c51c7ae1af9b1177d72705992.tar.gz
linux-stable-f2fc4b44ec2bb94c51c7ae1af9b1177d72705992.tar.bz2
linux-stable-f2fc4b44ec2bb94c51c7ae1af9b1177d72705992.zip
mm: move init_mem_debugging_and_hardening() to mm/mm_init.c
init_mem_debugging_and_hardening() is only called from mm_core_init(). Move it close to the caller, make it static and rename it to mem_debugging_and_hardening_init() for consistency with surrounding convention. Link: https://lkml.kernel.org/r/20230321170513.2401534-10-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Doug Berger <opendmb@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h8
-rw-r--r--mm/mm_init.c91
-rw-r--r--mm/page_alloc.c95
3 files changed, 98 insertions, 96 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 22f1410a0ee3..a2934c610182 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -204,6 +204,14 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
extern char * const zone_names[MAX_NR_ZONES];
+/* perform sanity checks on struct pages being allocated or freed */
+DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+
+static inline bool is_check_pages_enabled(void)
+{
+ return static_branch_unlikely(&check_pages_enabled);
+}
+
/*
* Structure for holding the mostly immutable allocation parameters passed
* between functions involved in allocations, including the alloc_pages*
diff --git a/mm/mm_init.c b/mm/mm_init.c
index f1475413394d..43f6d3ed24ef 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2531,6 +2531,95 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
__free_pages_core(page, order);
}
+static bool _init_on_alloc_enabled_early __read_mostly
+ = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
+static int __init early_init_on_alloc(char *buf)
+{
+
+ return kstrtobool(buf, &_init_on_alloc_enabled_early);
+}
+early_param("init_on_alloc", early_init_on_alloc);
+
+static bool _init_on_free_enabled_early __read_mostly
+ = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
+static int __init early_init_on_free(char *buf)
+{
+ return kstrtobool(buf, &_init_on_free_enabled_early);
+}
+early_param("init_on_free", early_init_on_free);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+
+/*
+ * Enable static keys related to various memory debugging and hardening options.
+ * Some override others, and depend on early params that are evaluated in the
+ * order of appearance. So we need to first gather the full picture of what was
+ * enabled, and then make decisions.
+ */
+static void __init mem_debugging_and_hardening_init(void)
+{
+ bool page_poisoning_requested = false;
+ bool want_check_pages = false;
+
+#ifdef CONFIG_PAGE_POISONING
+ /*
+ * Page poisoning is debug page alloc for some arches. If
+ * either of those options are enabled, enable poisoning.
+ */
+ if (page_poisoning_enabled() ||
+ (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+ debug_pagealloc_enabled())) {
+ static_branch_enable(&_page_poisoning_enabled);
+ page_poisoning_requested = true;
+ want_check_pages = true;
+ }
+#endif
+
+ if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+ page_poisoning_requested) {
+ pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+ "will take precedence over init_on_alloc and init_on_free\n");
+ _init_on_alloc_enabled_early = false;
+ _init_on_free_enabled_early = false;
+ }
+
+ if (_init_on_alloc_enabled_early) {
+ want_check_pages = true;
+ static_branch_enable(&init_on_alloc);
+ } else {
+ static_branch_disable(&init_on_alloc);
+ }
+
+ if (_init_on_free_enabled_early) {
+ want_check_pages = true;
+ static_branch_enable(&init_on_free);
+ } else {
+ static_branch_disable(&init_on_free);
+ }
+
+ if (IS_ENABLED(CONFIG_KMSAN) &&
+ (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
+ pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ if (debug_pagealloc_enabled()) {
+ want_check_pages = true;
+ static_branch_enable(&_debug_pagealloc_enabled);
+
+ if (debug_guardpage_minorder())
+ static_branch_enable(&_debug_guardpage_enabled);
+ }
+#endif
+
+ /*
+ * Any page debugging or hardening option also enables sanity checking
+ * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
+ * enabled already.
+ */
+ if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
+ static_branch_enable(&check_pages_enabled);
+}
+
/* Report memory auto-initialization states for this boot. */
static void __init report_meminit(void)
{
@@ -2570,7 +2659,7 @@ void __init mm_core_init(void)
* bigger than MAX_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
- init_mem_debugging_and_hardening();
+ mem_debugging_and_hardening_init();
kfence_alloc_pool();
report_meminit();
kmsan_init_shadow();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1131b87586d5..94bf3b7b5c49 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -240,31 +240,6 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free);
-/* perform sanity checks on struct pages being allocated or freed */
-static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
-
-static inline bool is_check_pages_enabled(void)
-{
- return static_branch_unlikely(&check_pages_enabled);
-}
-
-static bool _init_on_alloc_enabled_early __read_mostly
- = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
-static int __init early_init_on_alloc(char *buf)
-{
-
- return kstrtobool(buf, &_init_on_alloc_enabled_early);
-}
-early_param("init_on_alloc", early_init_on_alloc);
-
-static bool _init_on_free_enabled_early __read_mostly
- = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
-static int __init early_init_on_free(char *buf)
-{
- return kstrtobool(buf, &_init_on_free_enabled_early);
-}
-early_param("init_on_free", early_init_on_free);
-
/*
* A cached value of the page's pageblock's migratetype, used when the page is
* put on a pcplist. Used to avoid the pageblock migratetype lookup when
@@ -798,76 +773,6 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {}
#endif
-/*
- * Enable static keys related to various memory debugging and hardening options.
- * Some override others, and depend on early params that are evaluated in the
- * order of appearance. So we need to first gather the full picture of what was
- * enabled, and then make decisions.
- */
-void __init init_mem_debugging_and_hardening(void)
-{
- bool page_poisoning_requested = false;
- bool want_check_pages = false;
-
-#ifdef CONFIG_PAGE_POISONING
- /*
- * Page poisoning is debug page alloc for some arches. If
- * either of those options are enabled, enable poisoning.
- */
- if (page_poisoning_enabled() ||
- (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
- debug_pagealloc_enabled())) {
- static_branch_enable(&_page_poisoning_enabled);
- page_poisoning_requested = true;
- want_check_pages = true;
- }
-#endif
-
- if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
- page_poisoning_requested) {
- pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
- "will take precedence over init_on_alloc and init_on_free\n");
- _init_on_alloc_enabled_early = false;
- _init_on_free_enabled_early = false;
- }
-
- if (_init_on_alloc_enabled_early) {
- want_check_pages = true;
- static_branch_enable(&init_on_alloc);
- } else {
- static_branch_disable(&init_on_alloc);
- }
-
- if (_init_on_free_enabled_early) {
- want_check_pages = true;
- static_branch_enable(&init_on_free);
- } else {
- static_branch_disable(&init_on_free);
- }
-
- if (IS_ENABLED(CONFIG_KMSAN) &&
- (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
- pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (debug_pagealloc_enabled()) {
- want_check_pages = true;
- static_branch_enable(&_debug_pagealloc_enabled);
-
- if (debug_guardpage_minorder())
- static_branch_enable(&_debug_guardpage_enabled);
- }
-#endif
-
- /*
- * Any page debugging or hardening option also enables sanity checking
- * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
- * enabled already.
- */
- if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
- static_branch_enable(&check_pages_enabled);
-}
-
static inline void set_buddy_order(struct page *page, unsigned int order)
{
set_page_private(page, order);