summaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c57
1 files changed, 30 insertions, 27 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index aed0951b87fa..dc38539f8560 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -14,7 +14,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/bootmem_info.h>
-
+#include <linux/vmstat.h>
#include "internal.h"
#include <asm/dma.h>
@@ -129,7 +129,7 @@ static inline int sparse_early_nid(struct mem_section *section)
static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
unsigned long *end_pfn)
{
- unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+ unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
/*
* Sanity checks - do not allow an architecture to pass
@@ -192,13 +192,10 @@ static void subsection_mask_set(unsigned long *map, unsigned long pfn,
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
{
- int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
- unsigned long nr, start_sec = pfn_to_section_nr(pfn);
-
- if (!nr_pages)
- return;
+ int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
+ unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
- for (nr = start_sec; nr <= end_sec; nr++) {
+ for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
struct mem_section *ms;
unsigned long pfns;
@@ -226,33 +223,20 @@ static void __init memory_present(int nid, unsigned long start, unsigned long en
{
unsigned long pfn;
-#ifdef CONFIG_SPARSEMEM_EXTREME
- if (unlikely(!mem_section)) {
- unsigned long size, align;
-
- size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
- align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_alloc(size, align);
- if (!mem_section)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, size, align);
- }
-#endif
-
start &= PAGE_SECTION_MASK;
mminit_validate_memmodel_limits(&start, &end);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
- unsigned long section = pfn_to_section_nr(pfn);
+ unsigned long section_nr = pfn_to_section_nr(pfn);
struct mem_section *ms;
- sparse_index_init(section, nid);
- set_section_nid(section, nid);
+ sparse_index_init(section_nr, nid);
+ set_section_nid(section_nr, nid);
- ms = __nr_to_section(section);
+ ms = __nr_to_section(section_nr);
if (!ms->section_mem_map) {
ms->section_mem_map = sparse_encode_early_nid(nid) |
SECTION_IS_ONLINE;
- __section_mark_present(ms, section);
+ __section_mark_present(ms, section_nr);
}
}
}
@@ -267,6 +251,19 @@ static void __init memblocks_present(void)
unsigned long start, end;
int i, nid;
+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (unlikely(!mem_section)) {
+ unsigned long size, align;
+
+ size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
+ align = 1 << (INTERNODE_CACHE_SHIFT);
+ mem_section = memblock_alloc(size, align);
+ if (!mem_section)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, align);
+ }
+#endif
+
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
memory_present(nid, start, end);
}
@@ -351,7 +348,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
again:
usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
if (!usage && limit) {
- limit = 0;
+ limit = MEMBLOCK_ALLOC_ACCESSIBLE;
goto again;
}
return usage;
@@ -465,6 +462,9 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
*/
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+ memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
+#endif
}
static void __init sparse_buffer_fini(void)
@@ -560,6 +560,8 @@ void __init sparse_init(void)
unsigned long pnum_end, pnum_begin, map_count = 1;
int nid_begin;
+ /* see include/linux/mmzone.h 'struct mem_section' definition */
+ BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
memblocks_present();
pnum_begin = first_present_section_nr();
@@ -641,6 +643,7 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);
+ memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
vmemmap_free(start, end, altmap);
}
static void free_map_bootmem(struct page *memmap)