summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBaoquan He <bhe@redhat.com>2020-04-06 20:07:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-07 10:43:40 -0700
commit0a9f9f62316606ee827fa3318e95a1c489d9acf5 (patch)
treed27f453193214dc27066da1919693ba7db4b6339 /mm
parent37bc15020a96035cb157be5864b958672fe02d7c (diff)
downloadlinux-0a9f9f62316606ee827fa3318e95a1c489d9acf5.tar.gz
linux-0a9f9f62316606ee827fa3318e95a1c489d9acf5.tar.bz2
linux-0a9f9f62316606ee827fa3318e95a1c489d9acf5.zip
mm/sparse.c: only use subsection map in VMEMMAP case
Currently, to support subsection aligned memory region adding for pmem, subsection map is added to track which subsection is present. However, config ZONE_DEVICE depends on SPARSEMEM_VMEMMAP. It means subsection map only makes sense when SPARSEMEM_VMEMMAP enabled. For the classic sparse, it's meaningless. Even worse, it may confuse people when checking code related to the classic sparse. About the classic sparse which doesn't support subsection hotplug, Dan said it's more because the effort and maintenance burden outweighs the benefit. Besides, the current 64 bit ARCHes all enable SPARSEMEM_VMEMMAP_ENABLE by default. Combining the above reasons, no need to provide subsection map and the relevant handling for the classic sparse. Let's remove them. Signed-off-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Wei Yang <richard.weiyang@gmail.com> Link: http://lkml.kernel.org/r/20200312124414.439-4-bhe@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/sparse.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 01204c3b4649..095ecf5bb6d3 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -209,6 +209,7 @@ static inline unsigned long first_present_section_nr(void)
return next_present_section_nr(-1);
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{
@@ -243,6 +244,11 @@ void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
nr_pages -= pfns;
}
}
+#else
+void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
+{
+}
+#endif
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
@@ -705,6 +711,7 @@ static void free_map_bootmem(struct page *memmap)
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
@@ -731,6 +738,17 @@ static bool is_subsection_map_empty(struct mem_section *ms)
return bitmap_empty(&ms->usage->subsection_map[0],
SUBSECTIONS_PER_SECTION);
}
+#else
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+ return true;
+}
+#endif
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
@@ -792,6 +810,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
ms->section_mem_map = (unsigned long)NULL;
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
struct mem_section *ms = __pfn_to_section(pfn);
@@ -813,6 +832,12 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
return rc;
}
+#else
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+#endif
static struct page * __meminit section_activate(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap)