From f1c2c19c498e27de48bf0dc4221e6e31b1823169 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:17:17 +1000 Subject: memblock: Fix memblock_is_region_reserved() to return a boolean All callers expect a boolean result which is true if the region overlaps a reserved region. However, the implementation actually returns -1 if there is no overlap, and a region index (0 based) if there is. Make it behave as callers (and common sense) expect. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 3024eb30fc27..43840b305ecb 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -504,7 +504,7 @@ int __init memblock_is_reserved(u64 addr) int memblock_is_region_reserved(u64 base, u64 size) { - return memblock_overlaps_region(&memblock.reserved, base, size); + return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } /* -- cgit v1.2.3 From e3239ff92a17976ac5d26fa0fe40ef3a9daf2523 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:06:41 +1000 Subject: memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 168 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 83 insertions(+), 85 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 43840b305ecb..6f407ccf604e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -29,7 +29,7 @@ static int __init early_memblock(char *p) } early_param("memblock", early_memblock); -static void memblock_dump(struct memblock_region *region, char *name) +static void memblock_dump(struct memblock_type *region, char *name) { unsigned long long base, size; int i; @@ -37,8 +37,8 @@ static void memblock_dump(struct memblock_region *region, char *name) pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); for (i = 0; i < region->cnt; i++) { - base = region->region[i].base; - size = region->region[i].size; + base = region->regions[i].base; + size = region->regions[i].size; pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", name, i, base, base + size - 1, size); @@ -74,34 +74,34 @@ static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) return 0; } -static long memblock_regions_adjacent(struct memblock_region *rgn, +static long memblock_regions_adjacent(struct memblock_type *type, unsigned long r1, unsigned long r2) { - u64 base1 = rgn->region[r1].base; - u64 size1 = rgn->region[r1].size; - u64 base2 = rgn->region[r2].base; - u64 size2 = rgn->region[r2].size; + u64 base1 = type->regions[r1].base; + u64 size1 = type->regions[r1].size; + u64 base2 = type->regions[r2].base; + u64 size2 = type->regions[r2].size; return memblock_addrs_adjacent(base1, size1, base2, size2); } -static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) +static void memblock_remove_region(struct memblock_type *type, unsigned long r) { unsigned long i; - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; + for (i = r; i < type->cnt - 1; i++) { + type->regions[i].base = type->regions[i + 1].base; + type->regions[i].size = type->regions[i + 1].size; } - rgn->cnt--; + type->cnt--; } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void memblock_coalesce_regions(struct memblock_region *rgn, +static void memblock_coalesce_regions(struct memblock_type *type, unsigned long r1, unsigned long r2) { - rgn->region[r1].size += rgn->region[r2].size; - memblock_remove_region(rgn, r2); + type->regions[r1].size += type->regions[r2].size; + memblock_remove_region(type, r2); } void __init memblock_init(void) @@ -109,13 +109,13 @@ void __init memblock_init(void) /* Create a dummy zero size MEMBLOCK which will get coalesced away later. * This simplifies the memblock_add() code below... */ - memblock.memory.region[0].base = 0; - memblock.memory.region[0].size = 0; + memblock.memory.regions[0].base = 0; + memblock.memory.regions[0].size = 0; memblock.memory.cnt = 1; /* Ditto. */ - memblock.reserved.region[0].base = 0; - memblock.reserved.region[0].size = 0; + memblock.reserved.regions[0].base = 0; + memblock.reserved.regions[0].size = 0; memblock.reserved.cnt = 1; } @@ -126,24 +126,24 @@ void __init memblock_analyze(void) memblock.memory.size = 0; for (i = 0; i < memblock.memory.cnt; i++) - memblock.memory.size += memblock.memory.region[i].size; + memblock.memory.size += memblock.memory.regions[i].size; } -static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) +static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) { unsigned long coalesced = 0; long adjacent, i; - if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { - rgn->region[0].base = base; - rgn->region[0].size = size; + if ((type->cnt == 1) && (type->regions[0].size == 0)) { + type->regions[0].base = base; + type->regions[0].size = size; return 0; } /* First try and coalesce this MEMBLOCK with another. */ - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; + for (i = 0; i < type->cnt; i++) { + u64 rgnbase = type->regions[i].base; + u64 rgnsize = type->regions[i].size; if ((rgnbase == base) && (rgnsize == size)) /* Already have this region, so we're done */ @@ -151,61 +151,59 @@ static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); if (adjacent > 0) { - rgn->region[i].base -= size; - rgn->region[i].size += size; + type->regions[i].base -= size; + type->regions[i].size += size; coalesced++; break; } else if (adjacent < 0) { - rgn->region[i].size += size; + type->regions[i].size += size; coalesced++; break; } } - if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { - memblock_coalesce_regions(rgn, i, i+1); + if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) { + memblock_coalesce_regions(type, i, i+1); coalesced++; } if (coalesced) return coalesced; - if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) + if (type->cnt >= MAX_MEMBLOCK_REGIONS) return -1; /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ - for (i = rgn->cnt - 1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i+1].base = rgn->region[i].base; - rgn->region[i+1].size = rgn->region[i].size; + for (i = type->cnt - 1; i >= 0; i--) { + if (base < type->regions[i].base) { + type->regions[i+1].base = type->regions[i].base; + type->regions[i+1].size = type->regions[i].size; } else { - rgn->region[i+1].base = base; - rgn->region[i+1].size = size; + type->regions[i+1].base = base; + type->regions[i+1].size = size; break; } } - if (base < rgn->region[0].base) { - rgn->region[0].base = base; - rgn->region[0].size = size; + if (base < type->regions[0].base) { + type->regions[0].base = base; + type->regions[0].size = size; } - rgn->cnt++; + type->cnt++; return 0; } long memblock_add(u64 base, u64 size) { - struct memblock_region *_rgn = &memblock.memory; - /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ if (base == 0) memblock.rmo_size = size; - return memblock_add_region(_rgn, base, size); + return memblock_add_region(&memblock.memory, base, size); } -static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) +static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) { u64 rgnbegin, rgnend; u64 end = base + size; @@ -214,34 +212,34 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) rgnbegin = rgnend = 0; /* supress gcc warnings */ /* Find the region where (base, size) belongs to */ - for (i=0; i < rgn->cnt; i++) { - rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size; + for (i=0; i < type->cnt; i++) { + rgnbegin = type->regions[i].base; + rgnend = rgnbegin + type->regions[i].size; if ((rgnbegin <= base) && (end <= rgnend)) break; } /* Didn't find the region */ - if (i == rgn->cnt) + if (i == type->cnt) return -1; /* Check to see if we are removing entire region */ if ((rgnbegin == base) && (rgnend == end)) { - memblock_remove_region(rgn, i); + memblock_remove_region(type, i); return 0; } /* Check to see if region is matching at the front */ if (rgnbegin == base) { - rgn->region[i].base = end; - rgn->region[i].size -= size; + type->regions[i].base = end; + type->regions[i].size -= size; return 0; } /* Check to see if the region is matching at the end */ if (rgnend == end) { - rgn->region[i].size -= size; + type->regions[i].size -= size; return 0; } @@ -249,8 +247,8 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) * We need to split the entry - adjust the current one to the * beginging of the hole and add the region after hole. */ - rgn->region[i].size = base - rgn->region[i].base; - return memblock_add_region(rgn, end, rgnend - end); + type->regions[i].size = base - type->regions[i].base; + return memblock_add_region(type, end, rgnend - end); } long memblock_remove(u64 base, u64 size) @@ -265,25 +263,25 @@ long __init memblock_free(u64 base, u64 size) long __init memblock_reserve(u64 base, u64 size) { - struct memblock_region *_rgn = &memblock.reserved; + struct memblock_type *_rgn = &memblock.reserved; BUG_ON(0 == size); return memblock_add_region(_rgn, base, size); } -long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) +long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) { unsigned long i; - for (i = 0; i < rgn->cnt; i++) { - u64 rgnbase = rgn->region[i].base; - u64 rgnsize = rgn->region[i].size; + for (i = 0; i < type->cnt; i++) { + u64 rgnbase = type->regions[i].base; + u64 rgnsize = type->regions[i].size; if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) break; } - return (i < rgn->cnt) ? i : -1; + return (i < type->cnt) ? i : -1; } static u64 memblock_align_down(u64 addr, u64 size) @@ -311,7 +309,7 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, base = ~(u64)0; return base; } - res_base = memblock.reserved.region[j].base; + res_base = memblock.reserved.regions[j].base; if (res_base < size) break; base = memblock_align_down(res_base - size, align); @@ -320,7 +318,7 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, return ~(u64)0; } -static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, +static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, u64 (*nid_range)(u64, u64, int *), u64 size, u64 align, int nid) { @@ -350,7 +348,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, u64 (*nid_range)(u64 start, u64 end, int *nid)) { - struct memblock_region *mem = &memblock.memory; + struct memblock_type *mem = &memblock.memory; int i; BUG_ON(0 == size); @@ -358,7 +356,7 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, size = memblock_align_up(size, align); for (i = 0; i < mem->cnt; i++) { - u64 ret = memblock_alloc_nid_region(&mem->region[i], + u64 ret = memblock_alloc_nid_region(&mem->regions[i], nid_range, size, align, nid); if (ret != ~(u64)0) @@ -402,8 +400,8 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) max_addr = MEMBLOCK_REAL_LIMIT; for (i = memblock.memory.cnt - 1; i >= 0; i--) { - u64 memblockbase = memblock.memory.region[i].base; - u64 memblocksize = memblock.memory.region[i].size; + u64 memblockbase = memblock.memory.regions[i].base; + u64 memblocksize = memblock.memory.regions[i].size; if (memblocksize < size) continue; @@ -423,7 +421,7 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) return 0; return base; } - res_base = memblock.reserved.region[j].base; + res_base = memblock.reserved.regions[j].base; if (res_base < size) break; base = memblock_align_down(res_base - size, align); @@ -442,7 +440,7 @@ u64 memblock_end_of_DRAM(void) { int idx = memblock.memory.cnt - 1; - return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); + return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); } /* You must call memblock_analyze() after this. */ @@ -450,7 +448,7 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) { unsigned long i; u64 limit; - struct memblock_property *p; + struct memblock_region *p; if (!memory_limit) return; @@ -458,24 +456,24 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) /* Truncate the memblock regions to satisfy the memory limit. */ limit = memory_limit; for (i = 0; i < memblock.memory.cnt; i++) { - if (limit > memblock.memory.region[i].size) { - limit -= memblock.memory.region[i].size; + if (limit > memblock.memory.regions[i].size) { + limit -= memblock.memory.regions[i].size; continue; } - memblock.memory.region[i].size = limit; + memblock.memory.regions[i].size = limit; memblock.memory.cnt = i + 1; break; } - if (memblock.memory.region[0].size < memblock.rmo_size) - memblock.rmo_size = memblock.memory.region[0].size; + if (memblock.memory.regions[0].size < memblock.rmo_size) + memblock.rmo_size = memblock.memory.regions[0].size; memory_limit = memblock_end_of_DRAM(); /* And truncate any reserves above the limit also. */ for (i = 0; i < memblock.reserved.cnt; i++) { - p = &memblock.reserved.region[i]; + p = &memblock.reserved.regions[i]; if (p->base > memory_limit) p->size = 0; @@ -494,9 +492,9 @@ int __init memblock_is_reserved(u64 addr) int i; for (i = 0; i < memblock.reserved.cnt; i++) { - u64 upper = memblock.reserved.region[i].base + - memblock.reserved.region[i].size - 1; - if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) + u64 upper = memblock.reserved.regions[i].base + + memblock.reserved.regions[i].size - 1; + if ((addr >= memblock.reserved.regions[i].base) && (addr <= upper)) return 1; } return 0; @@ -511,7 +509,7 @@ int memblock_is_region_reserved(u64 base, u64 size) * Given a , find which memory regions belong to this range. * Adjust the request and return a contiguous chunk. */ -int memblock_find(struct memblock_property *res) +int memblock_find(struct memblock_region *res) { int i; u64 rstart, rend; @@ -520,8 +518,8 @@ int memblock_find(struct memblock_property *res) rend = rstart + res->size - 1; for (i = 0; i < memblock.memory.cnt; i++) { - u64 start = memblock.memory.region[i].base; - u64 end = start + memblock.memory.region[i].size - 1; + u64 start = memblock.memory.regions[i].base; + u64 end = start + memblock.memory.regions[i].size - 1; if (start > rend) return -1; -- cgit v1.2.3 From 72d4b0b4e0e7fa858767e03972771a9f7c02b689 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:38:47 +1000 Subject: memblock: Implement memblock_is_memory and memblock_is_region_memory To make it fast, we steal ARM's binary search for memblock_is_memory() and we use that to also the replace existing implementation of memblock_is_reserved(). Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 42 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 6f407ccf604e..aa88c62bce7f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -487,17 +487,43 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) } } +static int memblock_search(struct memblock_type *type, u64 addr) +{ + unsigned int left = 0, right = type->cnt; + + do { + unsigned int mid = (right + left) / 2; + + if (addr < type->regions[mid].base) + right = mid; + else if (addr >= (type->regions[mid].base + + type->regions[mid].size)) + left = mid + 1; + else + return mid; + } while (left < right); + return -1; +} + int __init memblock_is_reserved(u64 addr) { - int i; + return memblock_search(&memblock.reserved, addr) != -1; +} - for (i = 0; i < memblock.reserved.cnt; i++) { - u64 upper = memblock.reserved.regions[i].base + - memblock.reserved.regions[i].size - 1; - if ((addr >= memblock.reserved.regions[i].base) && (addr <= upper)) - return 1; - } - return 0; +int memblock_is_memory(u64 addr) +{ + return memblock_search(&memblock.memory, addr) != -1; +} + +int memblock_is_region_memory(u64 base, u64 size) +{ + int idx = memblock_search(&memblock.reserved, base); + + if (idx == -1) + return 0; + return memblock.reserved.regions[idx].base <= base && + (memblock.reserved.regions[idx].base + + memblock.reserved.regions[idx].size) >= (base + size); } int memblock_is_region_reserved(u64 base, u64 size) -- cgit v1.2.3 From b693fffb189fbfe7e1e8317ce5838808be8666a0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:52:55 +1000 Subject: memblock: Remove memblock_find() Nobody uses it anymore. It's semantics were ... weird Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 32 -------------------------------- 1 file changed, 32 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index aa88c62bce7f..8a118b71cbec 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -531,35 +531,3 @@ int memblock_is_region_reserved(u64 base, u64 size) return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } -/* - * Given a , find which memory regions belong to this range. - * Adjust the request and return a contiguous chunk. - */ -int memblock_find(struct memblock_region *res) -{ - int i; - u64 rstart, rend; - - rstart = res->base; - rend = rstart + res->size - 1; - - for (i = 0; i < memblock.memory.cnt; i++) { - u64 start = memblock.memory.regions[i].base; - u64 end = start + memblock.memory.regions[i].size - 1; - - if (start > rend) - return -1; - - if ((end >= rstart) && (start < rend)) { - /* adjust the request */ - if (rstart < start) - rstart = start; - if (rend > end) - rend = end; - res->base = rstart; - res->size = rend - rstart + 1; - return 0; - } - } - return -1; -} -- cgit v1.2.3 From 35a1f0bd07015dde66501b47cfb6ddc72ebe7346 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:38:58 -0700 Subject: memblock: Remove nid_range argument, arch provides memblock_nid_range() instead Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 8a118b71cbec..13807f280ada 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -319,7 +319,6 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, } static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, - u64 (*nid_range)(u64, u64, int *), u64 size, u64 align, int nid) { u64 start, end; @@ -332,7 +331,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, u64 this_end; int this_nid; - this_end = nid_range(start, end, &this_nid); + this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { u64 ret = memblock_alloc_nid_unreserved(start, this_end, size, align); @@ -345,8 +344,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, return ~(u64)0; } -u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, - u64 (*nid_range)(u64 start, u64 end, int *nid)) +u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) { struct memblock_type *mem = &memblock.memory; int i; @@ -357,7 +355,6 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, for (i = 0; i < mem->cnt; i++) { u64 ret = memblock_alloc_nid_region(&mem->regions[i], - nid_range, size, align, nid); if (ret != ~(u64)0) return ret; @@ -531,3 +528,9 @@ int memblock_is_region_reserved(u64 base, u64 size) return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } +u64 __weak memblock_nid_range(u64 start, u64 end, int *nid) +{ + *nid = 0; + + return end; +} -- cgit v1.2.3 From c3f72b5706716ada7923def513486ab7bb3a5301 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:38:59 -0700 Subject: memblock: Factor the lowest level alloc function Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 59 +++++++++++++++++++++++++++-------------------------------- 1 file changed, 27 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 13807f280ada..e264e8c70892 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -294,8 +294,8 @@ static u64 memblock_align_up(u64 addr, u64 size) return (addr + (size - 1)) & ~(size - 1); } -static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, - u64 size, u64 align) +static u64 __init memblock_alloc_region(u64 start, u64 end, + u64 size, u64 align) { u64 base, res_base; long j; @@ -318,6 +318,13 @@ static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, return ~(u64)0; } +u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid) +{ + *nid = 0; + + return end; +} + static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, u64 size, u64 align, int nid) { @@ -333,8 +340,7 @@ static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { - u64 ret = memblock_alloc_nid_unreserved(start, this_end, - size, align); + u64 ret = memblock_alloc_region(start, this_end, size, align); if (ret != ~(u64)0) return ret; } @@ -351,6 +357,10 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) BUG_ON(0 == size); + /* We do a bottom-up search for a region with the right + * nid since that's easier considering how memblock_nid_range() + * works + */ size = memblock_align_up(size, align); for (i = 0; i < mem->cnt; i++) { @@ -383,7 +393,7 @@ u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) { - long i, j; + long i; u64 base = 0; u64 res_base; @@ -396,33 +406,24 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) max_addr = MEMBLOCK_REAL_LIMIT; + /* Pump up max_addr */ + if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) + max_addr = ~(u64)0; + + /* We do a top-down search, this tends to limit memory + * fragmentation by keeping early boot allocs near the + * top of memory + */ for (i = memblock.memory.cnt - 1; i >= 0; i--) { u64 memblockbase = memblock.memory.regions[i].base; u64 memblocksize = memblock.memory.regions[i].size; if (memblocksize < size) continue; - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) - base = memblock_align_down(memblockbase + memblocksize - size, align); - else if (memblockbase < max_addr) { - base = min(memblockbase + memblocksize, max_addr); - base = memblock_align_down(base - size, align); - } else - continue; - - while (base && memblockbase <= base) { - j = memblock_overlaps_region(&memblock.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (memblock_add_region(&memblock.reserved, base, size) < 0) - return 0; - return base; - } - res_base = memblock.reserved.regions[j].base; - if (res_base < size) - break; - base = memblock_align_down(res_base - size, align); - } + base = min(memblockbase + memblocksize, max_addr); + res_base = memblock_alloc_region(memblockbase, base, size, align); + if (res_base != ~(u64)0) + return res_base; } return 0; } @@ -528,9 +529,3 @@ int memblock_is_region_reserved(u64 base, u64 size) return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } -u64 __weak memblock_nid_range(u64 start, u64 end, int *nid) -{ - *nid = 0; - - return end; -} -- cgit v1.2.3 From 27f574c223d2c09610058b3ec7a29582d63a3e06 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:00 -0700 Subject: memblock: Expose MEMBLOCK_ALLOC_ANYWHERE Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index e264e8c70892..0131684c42f8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -15,8 +15,6 @@ #include #include -#define MEMBLOCK_ALLOC_ANYWHERE 0 - struct memblock memblock; static int memblock_debug; -- cgit v1.2.3 From e63075a3c9377536d085bc013cd3fe6323162449 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:01 -0700 Subject: memblock: Introduce default allocation limit and use it to replace explicit ones This introduce memblock.current_limit which is used to limit allocations from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE). The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still be used with memblock_alloc_base() to allocate really anywhere. It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears. Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I strongly recommend that you ensure that you set an appropriate limit during boot in order to guarantee that an memblock_alloc() at any time results in something that is accessible with a simple __va(). The reason is that a subsequent patch will introduce the ability for the array to resize itself by reallocating itself. The MEMBLOCK core will honor the current limit when performing those allocations. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 0131684c42f8..770c5bfac2cd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -115,6 +115,8 @@ void __init memblock_init(void) memblock.reserved.regions[0].base = 0; memblock.reserved.regions[0].size = 0; memblock.reserved.cnt = 1; + + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; } void __init memblock_analyze(void) @@ -373,7 +375,7 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) u64 __init memblock_alloc(u64 size, u64 align) { - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) @@ -399,14 +401,9 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) size = memblock_align_up(size, align); - /* On some platforms, make sure we allocate lowmem */ - /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) - max_addr = MEMBLOCK_REAL_LIMIT; - /* Pump up max_addr */ - if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) - max_addr = ~(u64)0; + if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) + max_addr = memblock.current_limit; /* We do a top-down search, this tends to limit memory * fragmentation by keeping early boot allocs near the @@ -527,3 +524,9 @@ int memblock_is_region_reserved(u64 base, u64 size) return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } + +void __init memblock_set_current_limit(u64 limit) +{ + memblock.current_limit = limit; +} + -- cgit v1.2.3 From cd3db0c4ca3d237e7ad20f7107216e575705d2b0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:02 -0700 Subject: memblock: Remove rmo_size, burry it in arch/powerpc where it belongs The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact server ppc64 though I hijack it on embedded ppc64 for similar purposes) and represents the area of memory that can be accessed in real mode (aka with MMU off), or on embedded, from the exception vectors (which is bolted in the TLB) which pretty much boils down to the same thing. We take that out of the generic MEMBLOCK data structure and move it into arch/powerpc where it belongs, renaming it to "RMA" while at it. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 770c5bfac2cd..73d903ebf3d4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -49,7 +49,6 @@ void memblock_dump_all(void) return; pr_info("MEMBLOCK configuration:\n"); - pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); memblock_dump(&memblock.memory, "memory"); @@ -195,10 +194,6 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) long memblock_add(u64 base, u64 size) { - /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ - if (base == 0) - memblock.rmo_size = size; - return memblock_add_region(&memblock.memory, base, size); } @@ -459,9 +454,6 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) break; } - if (memblock.memory.regions[0].size < memblock.rmo_size) - memblock.rmo_size = memblock.memory.regions[0].size; - memory_limit = memblock_end_of_DRAM(); /* And truncate any reserves above the limit also. */ -- cgit v1.2.3 From 2898cc4cdf208f15246b7a1c6951d2b126a70fd6 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:34:42 +1000 Subject: memblock: Change u64 to phys_addr_t Let's not waste space and cycles on archs that don't support >32-bit physical address space. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 118 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 60 insertions(+), 58 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 73d903ebf3d4..81da63592a68 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -55,13 +55,14 @@ void memblock_dump_all(void) memblock_dump(&memblock.reserved, "reserved"); } -static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, - u64 size2) +static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, + phys_addr_t base2, phys_addr_t size2) { return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) +static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, + phys_addr_t base2, phys_addr_t size2) { if (base2 == base1 + size1) return 1; @@ -72,12 +73,12 @@ static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) } static long memblock_regions_adjacent(struct memblock_type *type, - unsigned long r1, unsigned long r2) + unsigned long r1, unsigned long r2) { - u64 base1 = type->regions[r1].base; - u64 size1 = type->regions[r1].size; - u64 base2 = type->regions[r2].base; - u64 size2 = type->regions[r2].size; + phys_addr_t base1 = type->regions[r1].base; + phys_addr_t size1 = type->regions[r1].size; + phys_addr_t base2 = type->regions[r2].base; + phys_addr_t size2 = type->regions[r2].size; return memblock_addrs_adjacent(base1, size1, base2, size2); } @@ -128,7 +129,7 @@ void __init memblock_analyze(void) memblock.memory.size += memblock.memory.regions[i].size; } -static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) +static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long coalesced = 0; long adjacent, i; @@ -141,8 +142,8 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) /* First try and coalesce this MEMBLOCK with another. */ for (i = 0; i < type->cnt; i++) { - u64 rgnbase = type->regions[i].base; - u64 rgnsize = type->regions[i].size; + phys_addr_t rgnbase = type->regions[i].base; + phys_addr_t rgnsize = type->regions[i].size; if ((rgnbase == base) && (rgnsize == size)) /* Already have this region, so we're done */ @@ -192,16 +193,16 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size) return 0; } -long memblock_add(u64 base, u64 size) +long memblock_add(phys_addr_t base, phys_addr_t size) { return memblock_add_region(&memblock.memory, base, size); } -static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) +static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { - u64 rgnbegin, rgnend; - u64 end = base + size; + phys_addr_t rgnbegin, rgnend; + phys_addr_t end = base + size; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ @@ -246,17 +247,17 @@ static long __memblock_remove(struct memblock_type *type, u64 base, u64 size) return memblock_add_region(type, end, rgnend - end); } -long memblock_remove(u64 base, u64 size) +long memblock_remove(phys_addr_t base, phys_addr_t size) { return __memblock_remove(&memblock.memory, base, size); } -long __init memblock_free(u64 base, u64 size) +long __init memblock_free(phys_addr_t base, phys_addr_t size) { return __memblock_remove(&memblock.reserved, base, size); } -long __init memblock_reserve(u64 base, u64 size) +long __init memblock_reserve(phys_addr_t base, phys_addr_t size) { struct memblock_type *_rgn = &memblock.reserved; @@ -265,13 +266,13 @@ long __init memblock_reserve(u64 base, u64 size) return memblock_add_region(_rgn, base, size); } -long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) +long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; for (i = 0; i < type->cnt; i++) { - u64 rgnbase = type->regions[i].base; - u64 rgnsize = type->regions[i].size; + phys_addr_t rgnbase = type->regions[i].base; + phys_addr_t rgnsize = type->regions[i].size; if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) break; } @@ -279,20 +280,20 @@ long memblock_overlaps_region(struct memblock_type *type, u64 base, u64 size) return (i < type->cnt) ? i : -1; } -static u64 memblock_align_down(u64 addr, u64 size) +static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) { return addr & ~(size - 1); } -static u64 memblock_align_up(u64 addr, u64 size) +static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) { return (addr + (size - 1)) & ~(size - 1); } -static u64 __init memblock_alloc_region(u64 start, u64 end, - u64 size, u64 align) +static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align) { - u64 base, res_base; + phys_addr_t base, res_base; long j; base = memblock_align_down((end - size), align); @@ -301,7 +302,7 @@ static u64 __init memblock_alloc_region(u64 start, u64 end, if (j < 0) { /* this area isn't reserved, take it */ if (memblock_add_region(&memblock.reserved, base, size) < 0) - base = ~(u64)0; + base = ~(phys_addr_t)0; return base; } res_base = memblock.reserved.regions[j].base; @@ -310,42 +311,43 @@ static u64 __init memblock_alloc_region(u64 start, u64 end, base = memblock_align_down(res_base - size, align); } - return ~(u64)0; + return ~(phys_addr_t)0; } -u64 __weak __init memblock_nid_range(u64 start, u64 end, int *nid) +phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) { *nid = 0; return end; } -static u64 __init memblock_alloc_nid_region(struct memblock_region *mp, - u64 size, u64 align, int nid) +static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, + phys_addr_t size, + phys_addr_t align, int nid) { - u64 start, end; + phys_addr_t start, end; start = mp->base; end = start + mp->size; start = memblock_align_up(start, align); while (start < end) { - u64 this_end; + phys_addr_t this_end; int this_nid; this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { - u64 ret = memblock_alloc_region(start, this_end, size, align); - if (ret != ~(u64)0) + phys_addr_t ret = memblock_alloc_region(start, this_end, size, align); + if (ret != ~(phys_addr_t)0) return ret; } start = this_end; } - return ~(u64)0; + return ~(phys_addr_t)0; } -u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) +phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { struct memblock_type *mem = &memblock.memory; int i; @@ -359,23 +361,23 @@ u64 __init memblock_alloc_nid(u64 size, u64 align, int nid) size = memblock_align_up(size, align); for (i = 0; i < mem->cnt; i++) { - u64 ret = memblock_alloc_nid_region(&mem->regions[i], + phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], size, align, nid); - if (ret != ~(u64)0) + if (ret != ~(phys_addr_t)0) return ret; } return memblock_alloc(size, align); } -u64 __init memblock_alloc(u64 size, u64 align) +phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) { return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } -u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) +phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - u64 alloc; + phys_addr_t alloc; alloc = __memblock_alloc_base(size, align, max_addr); @@ -386,11 +388,11 @@ u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) return alloc; } -u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) +phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { long i; - u64 base = 0; - u64 res_base; + phys_addr_t base = 0; + phys_addr_t res_base; BUG_ON(0 == size); @@ -405,26 +407,26 @@ u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) * top of memory */ for (i = memblock.memory.cnt - 1; i >= 0; i--) { - u64 memblockbase = memblock.memory.regions[i].base; - u64 memblocksize = memblock.memory.regions[i].size; + phys_addr_t memblockbase = memblock.memory.regions[i].base; + phys_addr_t memblocksize = memblock.memory.regions[i].size; if (memblocksize < size) continue; base = min(memblockbase + memblocksize, max_addr); res_base = memblock_alloc_region(memblockbase, base, size, align); - if (res_base != ~(u64)0) + if (res_base != ~(phys_addr_t)0) return res_base; } return 0; } /* You must call memblock_analyze() before this. */ -u64 __init memblock_phys_mem_size(void) +phys_addr_t __init memblock_phys_mem_size(void) { return memblock.memory.size; } -u64 memblock_end_of_DRAM(void) +phys_addr_t memblock_end_of_DRAM(void) { int idx = memblock.memory.cnt - 1; @@ -432,10 +434,10 @@ u64 memblock_end_of_DRAM(void) } /* You must call memblock_analyze() after this. */ -void __init memblock_enforce_memory_limit(u64 memory_limit) +void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) { unsigned long i; - u64 limit; + phys_addr_t limit; struct memblock_region *p; if (!memory_limit) @@ -472,7 +474,7 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) } } -static int memblock_search(struct memblock_type *type, u64 addr) +static int memblock_search(struct memblock_type *type, phys_addr_t addr) { unsigned int left = 0, right = type->cnt; @@ -490,17 +492,17 @@ static int memblock_search(struct memblock_type *type, u64 addr) return -1; } -int __init memblock_is_reserved(u64 addr) +int __init memblock_is_reserved(phys_addr_t addr) { return memblock_search(&memblock.reserved, addr) != -1; } -int memblock_is_memory(u64 addr) +int memblock_is_memory(phys_addr_t addr) { return memblock_search(&memblock.memory, addr) != -1; } -int memblock_is_region_memory(u64 base, u64 size) +int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.reserved, base); @@ -511,13 +513,13 @@ int memblock_is_region_memory(u64 base, u64 size) memblock.reserved.regions[idx].size) >= (base + size); } -int memblock_is_region_reserved(u64 base, u64 size) +int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } -void __init memblock_set_current_limit(u64 limit) +void __init memblock_set_current_limit(phys_addr_t limit) { memblock.current_limit = limit; } -- cgit v1.2.3 From 4734b594c6ca1be796d30c82d93fdf5160f45124 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 28 Jul 2010 14:31:29 +1000 Subject: memblock: Remove memblock_type.size and add memblock.memory_size instead Right now, both the "memory" and "reserved" memblock_type structures have a "size" member. It represents the calculated memory size in the former case and is unused in the latter. This moves it out to the main memblock structure instead Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 81da63592a68..5ae413e9afd8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -49,7 +49,7 @@ void memblock_dump_all(void) return; pr_info("MEMBLOCK configuration:\n"); - pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); + pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); memblock_dump(&memblock.memory, "memory"); memblock_dump(&memblock.reserved, "reserved"); @@ -123,10 +123,10 @@ void __init memblock_analyze(void) { int i; - memblock.memory.size = 0; + memblock.memory_size = 0; for (i = 0; i < memblock.memory.cnt; i++) - memblock.memory.size += memblock.memory.regions[i].size; + memblock.memory_size += memblock.memory.regions[i].size; } static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) @@ -423,7 +423,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph /* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { - return memblock.memory.size; + return memblock.memory_size; } phys_addr_t memblock_end_of_DRAM(void) -- cgit v1.2.3 From bf23c51f1f49d3960f3cd8e3d2e7f943d9c41042 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:06 -0700 Subject: memblock: Move memblock arrays to static storage in memblock.c and make their size a variable This is in preparation for having resizable arrays. Note that we still allocate one more than needed, this is unchanged from the previous implementation. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 5ae413e9afd8..3c474502d92b 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -18,6 +18,8 @@ struct memblock memblock; static int memblock_debug; +static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; +static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; static int __init early_memblock(char *p) { @@ -104,6 +106,12 @@ static void memblock_coalesce_regions(struct memblock_type *type, void __init memblock_init(void) { + /* Hookup the initial arrays */ + memblock.memory.regions = memblock_memory_init_regions; + memblock.memory.max = INIT_MEMBLOCK_REGIONS; + memblock.reserved.regions = memblock_reserved_init_regions; + memblock.reserved.max = INIT_MEMBLOCK_REGIONS; + /* Create a dummy zero size MEMBLOCK which will get coalesced away later. * This simplifies the memblock_add() code below... */ @@ -169,7 +177,7 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph if (coalesced) return coalesced; - if (type->cnt >= MAX_MEMBLOCK_REGIONS) + if (type->cnt >= type->max) return -1; /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ -- cgit v1.2.3 From 449e8df39d3f94a69deae8341f157f6ef5999015 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:07 -0700 Subject: memblock: Add debug markers at the end of the array Since we allocate one more than needed, why not do a bit of sanity checking here to ensure we don't walk past the end of the array ? Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 3c474502d92b..a925866e1455 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -13,6 +13,7 @@ #include #include #include +#include #include struct memblock memblock; @@ -112,6 +113,10 @@ void __init memblock_init(void) memblock.reserved.regions = memblock_reserved_init_regions; memblock.reserved.max = INIT_MEMBLOCK_REGIONS; + /* Write a marker in the unused last array entry */ + memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; + memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; + /* Create a dummy zero size MEMBLOCK which will get coalesced away later. * This simplifies the memblock_add() code below... */ @@ -131,6 +136,12 @@ void __init memblock_analyze(void) { int i; + /* Check marker in the unused last array entry */ + WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base + != (phys_addr_t)RED_INACTIVE); + WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base + != (phys_addr_t)RED_INACTIVE); + memblock.memory_size = 0; for (i = 0; i < memblock.memory.cnt; i++) -- cgit v1.2.3 From 3a9c2c81eb2024c136cc534df534f93682d516d0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 12 Jul 2010 13:28:15 +1000 Subject: memblock: Make memblock_find_region() out of memblock_alloc_region() This function will be used to locate a free area to put the new memblock arrays when attempting to resize them. memblock_alloc_region() is gone, the two callsites now call memblock_add_region(). Signed-off-by: Benjamin Herrenschmidt --- v2. Fix membase_alloc_nid_region() conversion --- mm/memblock.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index a925866e1455..c1d2060e213e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -309,8 +309,8 @@ static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) return (addr + (size - 1)) & ~(size - 1); } -static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align) +static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align) { phys_addr_t base, res_base; long j; @@ -318,12 +318,8 @@ static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t e base = memblock_align_down((end - size), align); while (start <= base) { j = memblock_overlaps_region(&memblock.reserved, base, size); - if (j < 0) { - /* this area isn't reserved, take it */ - if (memblock_add_region(&memblock.reserved, base, size) < 0) - base = ~(phys_addr_t)0; + if (j < 0) return base; - } res_base = memblock.reserved.regions[j].base; if (res_base < size) break; @@ -356,8 +352,9 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { - phys_addr_t ret = memblock_alloc_region(start, this_end, size, align); - if (ret != ~(phys_addr_t)0) + phys_addr_t ret = memblock_find_region(start, this_end, size, align); + if (ret != ~(phys_addr_t)0 && + memblock_add_region(&memblock.reserved, ret, size) >= 0) return ret; } start = this_end; @@ -432,8 +429,9 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph if (memblocksize < size) continue; base = min(memblockbase + memblocksize, max_addr); - res_base = memblock_alloc_region(memblockbase, base, size, align); - if (res_base != ~(phys_addr_t)0) + res_base = memblock_find_region(memblockbase, base, size, align); + if (res_base != ~(phys_addr_t)0 && + memblock_add_region(&memblock.reserved, res_base, size) >= 0) return res_base; } return 0; -- cgit v1.2.3 From 4d629f9a02e32f8fe035a11018472ea8ff9647eb Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:09 -0700 Subject: memblock: Define MEMBLOCK_ERROR internally instead of using ~(phys_addr_t)0 Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index c1d2060e213e..fc7f97b2d994 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -22,6 +22,8 @@ static int memblock_debug; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; +#define MEMBLOCK_ERROR (~(phys_addr_t)0) + static int __init early_memblock(char *p) { if (p && strstr(p, "debug")) @@ -326,7 +328,7 @@ static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t en base = memblock_align_down(res_base - size, align); } - return ~(phys_addr_t)0; + return MEMBLOCK_ERROR; } phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) @@ -353,14 +355,14 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, this_end = memblock_nid_range(start, end, &this_nid); if (this_nid == nid) { phys_addr_t ret = memblock_find_region(start, this_end, size, align); - if (ret != ~(phys_addr_t)0 && + if (ret != MEMBLOCK_ERROR && memblock_add_region(&memblock.reserved, ret, size) >= 0) return ret; } start = this_end; } - return ~(phys_addr_t)0; + return MEMBLOCK_ERROR; } phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) @@ -379,7 +381,7 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n for (i = 0; i < mem->cnt; i++) { phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], size, align, nid); - if (ret != ~(phys_addr_t)0) + if (ret != MEMBLOCK_ERROR) return ret; } @@ -430,7 +432,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph continue; base = min(memblockbase + memblocksize, max_addr); res_base = memblock_find_region(memblockbase, base, size, align); - if (res_base != ~(phys_addr_t)0 && + if (res_base != MEMBLOCK_ERROR && memblock_add_region(&memblock.reserved, res_base, size) >= 0) return res_base; } -- cgit v1.2.3 From 7590abe891c85fbc65dc906516d0bf89e070c19a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:10 -0700 Subject: memblock: Move memblock_init() to the bottom of the file It's a real PITA to have to search for it in the middle Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index fc7f97b2d994..ae856d4e25a3 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -107,33 +107,6 @@ static void memblock_coalesce_regions(struct memblock_type *type, memblock_remove_region(type, r2); } -void __init memblock_init(void) -{ - /* Hookup the initial arrays */ - memblock.memory.regions = memblock_memory_init_regions; - memblock.memory.max = INIT_MEMBLOCK_REGIONS; - memblock.reserved.regions = memblock_reserved_init_regions; - memblock.reserved.max = INIT_MEMBLOCK_REGIONS; - - /* Write a marker in the unused last array entry */ - memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; - memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; - - /* Create a dummy zero size MEMBLOCK which will get coalesced away later. - * This simplifies the memblock_add() code below... - */ - memblock.memory.regions[0].base = 0; - memblock.memory.regions[0].size = 0; - memblock.memory.cnt = 1; - - /* Ditto. */ - memblock.reserved.regions[0].base = 0; - memblock.reserved.regions[0].size = 0; - memblock.reserved.cnt = 1; - - memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; -} - void __init memblock_analyze(void) { int i; @@ -543,3 +516,30 @@ void __init memblock_set_current_limit(phys_addr_t limit) memblock.current_limit = limit; } +void __init memblock_init(void) +{ + /* Hookup the initial arrays */ + memblock.memory.regions = memblock_memory_init_regions; + memblock.memory.max = INIT_MEMBLOCK_REGIONS; + memblock.reserved.regions = memblock_reserved_init_regions; + memblock.reserved.max = INIT_MEMBLOCK_REGIONS; + + /* Write a marker in the unused last array entry */ + memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; + memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; + + /* Create a dummy zero size MEMBLOCK which will get coalesced away later. + * This simplifies the memblock_add() code below... + */ + memblock.memory.regions[0].base = 0; + memblock.memory.regions[0].size = 0; + memblock.memory.cnt = 1; + + /* Ditto. */ + memblock.reserved.regions[0].base = 0; + memblock.reserved.regions[0].size = 0; + memblock.reserved.cnt = 1; + + memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; +} + -- cgit v1.2.3 From 7f219c736f9439acb1c50d264fbee93c353773ca Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 12 Jul 2010 14:24:57 +1000 Subject: memblock: split memblock_find_base() out of __memblock_alloc_base() This will be used by the array resize code and might prove useful to some arch code as well at which point it can be made non-static. Also add comment as to why aligning size is important Signed-off-by: Benjamin Herrenschmidt --- v2. Fix loss of size alignment v3. Fix result code --- mm/memblock.c | 58 ++++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index ae856d4e25a3..b775fca4fba5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -345,12 +345,15 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n BUG_ON(0 == size); + /* We align the size to limit fragmentation. Without this, a lot of + * small allocs quickly eat up the whole reserve array on sparc + */ + size = memblock_align_up(size, align); + /* We do a bottom-up search for a region with the right * nid since that's easier considering how memblock_nid_range() * works */ - size = memblock_align_up(size, align); - for (i = 0; i < mem->cnt; i++) { phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], size, align, nid); @@ -366,20 +369,7 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } -phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - phys_addr_t alloc; - - alloc = __memblock_alloc_base(size, align, max_addr); - - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", - (unsigned long long) size, (unsigned long long) max_addr); - - return alloc; -} - -phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) +static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { long i; phys_addr_t base = 0; @@ -387,8 +377,6 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph BUG_ON(0 == size); - size = memblock_align_up(size, align); - /* Pump up max_addr */ if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) max_addr = memblock.current_limit; @@ -405,13 +393,43 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph continue; base = min(memblockbase + memblocksize, max_addr); res_base = memblock_find_region(memblockbase, base, size, align); - if (res_base != MEMBLOCK_ERROR && - memblock_add_region(&memblock.reserved, res_base, size) >= 0) + if (res_base != MEMBLOCK_ERROR) return res_base; } + return MEMBLOCK_ERROR; +} + +phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) +{ + phys_addr_t found; + + /* We align the size to limit fragmentation. Without this, a lot of + * small allocs quickly eat up the whole reserve array on sparc + */ + size = memblock_align_up(size, align); + + found = memblock_find_base(size, align, max_addr); + if (found != MEMBLOCK_ERROR && + memblock_add_region(&memblock.reserved, found, size) >= 0) + return found; + return 0; } +phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) +{ + phys_addr_t alloc; + + alloc = __memblock_alloc_base(size, align, max_addr); + + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); + + return alloc; +} + + /* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { -- cgit v1.2.3 From 6ed311b282210d23d1a2cb2665aa899979993628 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 12 Jul 2010 14:36:48 +1000 Subject: memblock: Move functions around into a more sensible order Some shuffling is needed for doing array resize so we may as well put some sense into the ordering of the functions in the whole memblock.c file. No code change. Added some comments. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 301 +++++++++++++++++++++++++++++++--------------------------- 1 file changed, 159 insertions(+), 142 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index b775fca4fba5..e5f3f9bdc311 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -24,40 +24,18 @@ static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIO #define MEMBLOCK_ERROR (~(phys_addr_t)0) -static int __init early_memblock(char *p) -{ - if (p && strstr(p, "debug")) - memblock_debug = 1; - return 0; -} -early_param("memblock", early_memblock); +/* + * Address comparison utilities + */ -static void memblock_dump(struct memblock_type *region, char *name) +static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) { - unsigned long long base, size; - int i; - - pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); - - for (i = 0; i < region->cnt; i++) { - base = region->regions[i].base; - size = region->regions[i].size; - - pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", - name, i, base, base + size - 1, size); - } + return addr & ~(size - 1); } -void memblock_dump_all(void) +static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) { - if (!memblock_debug) - return; - - pr_info("MEMBLOCK configuration:\n"); - pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); - - memblock_dump(&memblock.memory, "memory"); - memblock_dump(&memblock.reserved, "reserved"); + return (addr + (size - 1)) & ~(size - 1); } static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, @@ -88,6 +66,77 @@ static long memblock_regions_adjacent(struct memblock_type *type, return memblock_addrs_adjacent(base1, size1, base2, size2); } +long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +{ + unsigned long i; + + for (i = 0; i < type->cnt; i++) { + phys_addr_t rgnbase = type->regions[i].base; + phys_addr_t rgnsize = type->regions[i].size; + if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) + break; + } + + return (i < type->cnt) ? i : -1; +} + +/* + * Find, allocate, deallocate or reserve unreserved regions. All allocations + * are top-down. + */ + +static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, + phys_addr_t size, phys_addr_t align) +{ + phys_addr_t base, res_base; + long j; + + base = memblock_align_down((end - size), align); + while (start <= base) { + j = memblock_overlaps_region(&memblock.reserved, base, size); + if (j < 0) + return base; + res_base = memblock.reserved.regions[j].base; + if (res_base < size) + break; + base = memblock_align_down(res_base - size, align); + } + + return MEMBLOCK_ERROR; +} + +static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) +{ + long i; + phys_addr_t base = 0; + phys_addr_t res_base; + + BUG_ON(0 == size); + + size = memblock_align_up(size, align); + + /* Pump up max_addr */ + if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) + max_addr = memblock.current_limit; + + /* We do a top-down search, this tends to limit memory + * fragmentation by keeping early boot allocs near the + * top of memory + */ + for (i = memblock.memory.cnt - 1; i >= 0; i--) { + phys_addr_t memblockbase = memblock.memory.regions[i].base; + phys_addr_t memblocksize = memblock.memory.regions[i].size; + + if (memblocksize < size) + continue; + base = min(memblockbase + memblocksize, max_addr); + res_base = memblock_find_region(memblockbase, base, size, align); + if (res_base != MEMBLOCK_ERROR) + return res_base; + } + return MEMBLOCK_ERROR; +} + static void memblock_remove_region(struct memblock_type *type, unsigned long r) { unsigned long i; @@ -107,22 +156,6 @@ static void memblock_coalesce_regions(struct memblock_type *type, memblock_remove_region(type, r2); } -void __init memblock_analyze(void) -{ - int i; - - /* Check marker in the unused last array entry */ - WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base - != (phys_addr_t)RED_INACTIVE); - WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base - != (phys_addr_t)RED_INACTIVE); - - memblock.memory_size = 0; - - for (i = 0; i < memblock.memory.cnt; i++) - memblock.memory_size += memblock.memory.regions[i].size; -} - static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long coalesced = 0; @@ -260,49 +293,47 @@ long __init memblock_reserve(phys_addr_t base, phys_addr_t size) return memblock_add_region(_rgn, base, size); } -long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - unsigned long i; + phys_addr_t found; - for (i = 0; i < type->cnt; i++) { - phys_addr_t rgnbase = type->regions[i].base; - phys_addr_t rgnsize = type->regions[i].size; - if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) - break; - } + /* We align the size to limit fragmentation. Without this, a lot of + * small allocs quickly eat up the whole reserve array on sparc + */ + size = memblock_align_up(size, align); - return (i < type->cnt) ? i : -1; -} + found = memblock_find_base(size, align, max_addr); + if (found != MEMBLOCK_ERROR && + memblock_add_region(&memblock.reserved, found, size) >= 0) + return found; -static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) -{ - return addr & ~(size - 1); + return 0; } -static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) +phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) { - return (addr + (size - 1)) & ~(size - 1); + phys_addr_t alloc; + + alloc = __memblock_alloc_base(size, align, max_addr); + + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); + + return alloc; } -static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align) +phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) { - phys_addr_t base, res_base; - long j; + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); +} - base = memblock_align_down((end - size), align); - while (start <= base) { - j = memblock_overlaps_region(&memblock.reserved, base, size); - if (j < 0) - return base; - res_base = memblock.reserved.regions[j].base; - if (res_base < size) - break; - base = memblock_align_down(res_base - size, align); - } - return MEMBLOCK_ERROR; -} +/* + * Additional node-local allocators. Search for node memory is bottom up + * and walks memblock regions within that node bottom-up as well, but allocation + * within an memblock region is top-down. + */ phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) { @@ -364,72 +395,6 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n return memblock_alloc(size, align); } -phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) -{ - return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); -} - -static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - long i; - phys_addr_t base = 0; - phys_addr_t res_base; - - BUG_ON(0 == size); - - /* Pump up max_addr */ - if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) - max_addr = memblock.current_limit; - - /* We do a top-down search, this tends to limit memory - * fragmentation by keeping early boot allocs near the - * top of memory - */ - for (i = memblock.memory.cnt - 1; i >= 0; i--) { - phys_addr_t memblockbase = memblock.memory.regions[i].base; - phys_addr_t memblocksize = memblock.memory.regions[i].size; - - if (memblocksize < size) - continue; - base = min(memblockbase + memblocksize, max_addr); - res_base = memblock_find_region(memblockbase, base, size, align); - if (res_base != MEMBLOCK_ERROR) - return res_base; - } - return MEMBLOCK_ERROR; -} - -phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - phys_addr_t found; - - /* We align the size to limit fragmentation. Without this, a lot of - * small allocs quickly eat up the whole reserve array on sparc - */ - size = memblock_align_up(size, align); - - found = memblock_find_base(size, align, max_addr); - if (found != MEMBLOCK_ERROR && - memblock_add_region(&memblock.reserved, found, size) >= 0) - return found; - - return 0; -} - -phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) -{ - phys_addr_t alloc; - - alloc = __memblock_alloc_base(size, align, max_addr); - - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", - (unsigned long long) size, (unsigned long long) max_addr); - - return alloc; -} - - /* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { @@ -534,6 +499,50 @@ void __init memblock_set_current_limit(phys_addr_t limit) memblock.current_limit = limit; } +static void memblock_dump(struct memblock_type *region, char *name) +{ + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); + + for (i = 0; i < region->cnt; i++) { + base = region->regions[i].base; + size = region->regions[i].size; + + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); + } +} + +void memblock_dump_all(void) +{ + if (!memblock_debug) + return; + + pr_info("MEMBLOCK configuration:\n"); + pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); + + memblock_dump(&memblock.memory, "memory"); + memblock_dump(&memblock.reserved, "reserved"); +} + +void __init memblock_analyze(void) +{ + int i; + + /* Check marker in the unused last array entry */ + WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base + != (phys_addr_t)RED_INACTIVE); + WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base + != (phys_addr_t)RED_INACTIVE); + + memblock.memory_size = 0; + + for (i = 0; i < memblock.memory.cnt; i++) + memblock.memory_size += memblock.memory.regions[i].size; +} + void __init memblock_init(void) { /* Hookup the initial arrays */ @@ -561,3 +570,11 @@ void __init memblock_init(void) memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; } +static int __init early_memblock(char *p) +{ + if (p && strstr(p, "debug")) + memblock_debug = 1; + return 0; +} +early_param("memblock", early_memblock); + -- cgit v1.2.3 From 142b45a72e221537c1bb1995497fef7cdc439e26 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:13 -0700 Subject: memblock: Add array resizing support When one of the array gets full, we resize it. After much thinking and a few iterations of that code, I went back to on-demand resizing using the (new) internal memblock_find_base() function, which is pretty much what Yinghai initially proposed, though there some differences in the details. To work this relies on the default alloc limit being set sensibly by the architecture. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 102 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index e5f3f9bdc311..0787790b1ce0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -18,12 +19,23 @@ struct memblock memblock; -static int memblock_debug; +static int memblock_debug, memblock_can_resize; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; #define MEMBLOCK_ERROR (~(phys_addr_t)0) +/* inline so we don't get a warning when pr_debug is compiled out */ +static inline const char *memblock_type_name(struct memblock_type *type) +{ + if (type == &memblock.memory) + return "memory"; + else if (type == &memblock.reserved) + return "reserved"; + else + return "unknown"; +} + /* * Address comparison utilities */ @@ -156,6 +168,79 @@ static void memblock_coalesce_regions(struct memblock_type *type, memblock_remove_region(type, r2); } +/* Defined below but needed now */ +static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); + +static int memblock_double_array(struct memblock_type *type) +{ + struct memblock_region *new_array, *old_array; + phys_addr_t old_size, new_size, addr; + int use_slab = slab_is_available(); + + /* We don't allow resizing until we know about the reserved regions + * of memory that aren't suitable for allocation + */ + if (!memblock_can_resize) + return -1; + + pr_debug("memblock: %s array full, doubling...", memblock_type_name(type)); + + /* Calculate new doubled size */ + old_size = type->max * sizeof(struct memblock_region); + new_size = old_size << 1; + + /* Try to find some space for it. + * + * WARNING: We assume that either slab_is_available() and we use it or + * we use MEMBLOCK for allocations. That means that this is unsafe to use + * when bootmem is currently active (unless bootmem itself is implemented + * on top of MEMBLOCK which isn't the case yet) + * + * This should however not be an issue for now, as we currently only + * call into MEMBLOCK while it's still active, or much later when slab is + * active for memory hotplug operations + */ + if (use_slab) { + new_array = kmalloc(new_size, GFP_KERNEL); + addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); + } else + addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE); + if (addr == MEMBLOCK_ERROR) { + pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", + memblock_type_name(type), type->max, type->max * 2); + return -1; + } + new_array = __va(addr); + + /* Found space, we now need to move the array over before + * we add the reserved region since it may be our reserved + * array itself that is full. + */ + memcpy(new_array, type->regions, old_size); + memset(new_array + type->max, 0, old_size); + old_array = type->regions; + type->regions = new_array; + type->max <<= 1; + + /* If we use SLAB that's it, we are done */ + if (use_slab) + return 0; + + /* Add the new reserved region now. Should not fail ! */ + BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); + + /* If the array wasn't our static init one, then free it. We only do + * that before SLAB is available as later on, we don't know whether + * to use kfree or free_bootmem_pages(). Shouldn't be a big deal + * anyways + */ + if (old_array != memblock_memory_init_regions && + old_array != memblock_reserved_init_regions) + memblock_free(__pa(old_array), old_size); + + return 0; +} + static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long coalesced = 0; @@ -196,7 +281,11 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph if (coalesced) return coalesced; - if (type->cnt >= type->max) + + /* If we are out of space, we fail. It's too late to resize the array + * but then this shouldn't have happened in the first place. + */ + if (WARN_ON(type->cnt >= type->max)) return -1; /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ @@ -217,6 +306,14 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph } type->cnt++; + /* The array is full ? Try to resize it. If that fails, we undo + * our allocation and return an error + */ + if (type->cnt == type->max && memblock_double_array(type)) { + type->cnt--; + return -1; + } + return 0; } @@ -541,6 +638,9 @@ void __init memblock_analyze(void) for (i = 0; i < memblock.memory.cnt; i++) memblock.memory_size += memblock.memory.regions[i].size; + + /* We allow resizing from there */ + memblock_can_resize = 1; } void __init memblock_init(void) -- cgit v1.2.3 From d2cd563ba82c424083b78e0ce97d68bfb04d1242 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:14 -0700 Subject: memblock: Add arch function to control coalescing of memblock memory regions Some archs such as ARM want to avoid coalescing accross things such as the lowmem/highmem boundary or similar. This provides the option to control it via an arch callback for which a weak default is provided which always allows coalescing. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 0787790b1ce0..8715f09434df 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -241,6 +241,12 @@ static int memblock_double_array(struct memblock_type *type) return 0; } +extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, + phys_addr_t addr2, phys_addr_t size2) +{ + return 1; +} + static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long coalesced = 0; @@ -262,6 +268,10 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph return 0; adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); + /* Check if arch allows coalescing */ + if (adjacent != 0 && type == &memblock.memory && + !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) + break; if (adjacent > 0) { type->regions[i].base -= size; type->regions[i].size += size; @@ -274,7 +284,14 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph } } - if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) { + /* If we plugged a hole, we may want to also coalesce with the + * next region + */ + if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && + ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, + type->regions[i].size, + type->regions[i+1].base, + type->regions[i+1].size)))) { memblock_coalesce_regions(type, i, i+1); coalesced++; } -- cgit v1.2.3 From fef501d49d31f997a3381b6c1efd5bca382b6b6f Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 12 Jul 2010 15:00:34 +1000 Subject: memblock: Add "start" argument to memblock_find_base() To constraint the search of a region between two boundaries, which will be used by the new NUMA aware allocator among others. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 8715f09434df..468ff43a72b4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -117,19 +117,18 @@ static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t en return MEMBLOCK_ERROR; } -static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) +static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, + phys_addr_t start, phys_addr_t end) { long i; - phys_addr_t base = 0; - phys_addr_t res_base; BUG_ON(0 == size); size = memblock_align_up(size, align); /* Pump up max_addr */ - if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE) - max_addr = memblock.current_limit; + if (end == MEMBLOCK_ALLOC_ACCESSIBLE) + end = memblock.current_limit; /* We do a top-down search, this tends to limit memory * fragmentation by keeping early boot allocs near the @@ -138,13 +137,19 @@ static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align for (i = memblock.memory.cnt - 1; i >= 0; i--) { phys_addr_t memblockbase = memblock.memory.regions[i].base; phys_addr_t memblocksize = memblock.memory.regions[i].size; + phys_addr_t bottom, top, found; if (memblocksize < size) continue; - base = min(memblockbase + memblocksize, max_addr); - res_base = memblock_find_region(memblockbase, base, size, align); - if (res_base != MEMBLOCK_ERROR) - return res_base; + if ((memblockbase + memblocksize) <= start) + break; + bottom = max(memblockbase, start); + top = min(memblockbase + memblocksize, end); + if (bottom >= top) + continue; + found = memblock_find_region(bottom, top, size, align); + if (found != MEMBLOCK_ERROR) + return found; } return MEMBLOCK_ERROR; } @@ -204,7 +209,7 @@ static int memblock_double_array(struct memblock_type *type) new_array = kmalloc(new_size, GFP_KERNEL); addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); } else - addr = memblock_find_base(new_size, sizeof(phys_addr_t), MEMBLOCK_ALLOC_ACCESSIBLE); + addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); if (addr == MEMBLOCK_ERROR) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", memblock_type_name(type), type->max, type->max * 2); @@ -416,7 +421,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph */ size = memblock_align_up(size, align); - found = memblock_find_base(size, align, max_addr); + found = memblock_find_base(size, align, 0, max_addr); if (found != MEMBLOCK_ERROR && memblock_add_region(&memblock.reserved, found, size) >= 0) return found; -- cgit v1.2.3 From c196f76fd5ece716ee3b7fa5dda3576961c0cecc Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:16 -0700 Subject: memblock: NUMA allocate can now use early_pfn_map We now provide a default (weak) implementation of memblock_nid_range() which uses the early_pfn_map[] if CONFIG_ARCH_POPULATES_NODE_MAP is set. Sparc still needs to use its own method due to the way the pages can be scattered between nodes. This implementation is inefficient due to our main algorithm and callback construct wanting to work on an ascending addresses bases while early_pfn_map[] would rather work with nid's (it's unsorted at that stage). But it should work and we can look into improving it subsequently, possibly using arch compile options to chose a different algorithm alltogether. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 468ff43a72b4..af7e4d9cf400 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -15,6 +15,7 @@ #include #include #include +#include #include struct memblock memblock; @@ -451,11 +452,36 @@ phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) /* * Additional node-local allocators. Search for node memory is bottom up * and walks memblock regions within that node bottom-up as well, but allocation - * within an memblock region is top-down. + * within an memblock region is top-down. XXX I plan to fix that at some stage + * + * WARNING: Only available after early_node_map[] has been populated, + * on some architectures, that is after all the calls to add_active_range() + * have been done to populate it. */ phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) { +#ifdef CONFIG_ARCH_POPULATES_NODE_MAP + /* + * This code originates from sparc which really wants use to walk by addresses + * and returns the nid. This is not very convenient for early_pfn_map[] users + * as the map isn't sorted yet, and it really wants to be walked by nid. + * + * For now, I implement the inefficient method below which walks the early + * map multiple times. Eventually we may want to use an ARCH config option + * to implement a completely different method for both case. + */ + unsigned long start_pfn, end_pfn; + int i; + + for (i = 0; i < MAX_NUMNODES; i++) { + get_pfn_range_for_nid(i, &start_pfn, &end_pfn); + if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) + continue; + *nid = i; + return min(end, PFN_PHYS(end_pfn)); + } +#endif *nid = 0; return end; -- cgit v1.2.3 From 9d1e24928e6a0728d1c7c76818ccbd11b93e7ac9 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:17 -0700 Subject: memblock: Separate memblock_alloc_nid() and memblock_alloc_try_nid() The former is now strict, it will fail if it cannot honor the allocation within the node, while the later implements the previous semantic which falls back to allocating anywhere. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index af7e4d9cf400..1802d97c7284 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -537,9 +537,23 @@ phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int n return ret; } + return 0; +} + +phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) +{ + phys_addr_t res = memblock_alloc_nid(size, align, nid); + + if (res) + return res; return memblock_alloc(size, align); } + +/* + * Remaining API functions + */ + /* You must call memblock_analyze() before this. */ phys_addr_t __init memblock_phys_mem_size(void) { -- cgit v1.2.3 From 918fe8d60331f679519ab8239a7232272126da9e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:18 -0700 Subject: memblock: Make memblock_alloc_try_nid() fallback to MEMBLOCK_ALLOC_ANYWHERE memblock_alloc_nid() used to fallback to allocating anywhere by using memblock_alloc() as a fallback. However, some of my previous patches limit memblock_alloc() to the region covered by MEMBLOCK_ALLOC_ACCESSIBLE which is not quite what we want for memblock_alloc_try_nid(). So we fix it by explicitely using MEMBLOCK_ALLOC_ANYWHERE. Not that so far only sparc uses memblock_alloc_nid() and it hasn't been updated to clamp the accessible zone yet. Thus the temporary "breakage" should have no effect. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 1802d97c7284..9de5fcdf8e28 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -546,7 +546,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i if (res) return res; - return memblock_alloc(size, align); + return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); } -- cgit v1.2.3 From 6d03b885f0926ab5b66e21307d505afcafa6dced Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:19 -0700 Subject: memblock: Add debugfs files to dump the arrays content Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 9de5fcdf8e28..cc15be29fd0a 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include struct memblock memblock; @@ -740,3 +742,52 @@ static int __init early_memblock(char *p) } early_param("memblock", early_memblock); +#ifdef CONFIG_DEBUG_FS + +static int memblock_debug_show(struct seq_file *m, void *private) +{ + struct memblock_type *type = m->private; + struct memblock_region *reg; + int i; + + for (i = 0; i < type->cnt; i++) { + reg = &type->regions[i]; + seq_printf(m, "%4d: ", i); + if (sizeof(phys_addr_t) == 4) + seq_printf(m, "0x%08lx..0x%08lx\n", + (unsigned long)reg->base, + (unsigned long)(reg->base + reg->size - 1)); + else + seq_printf(m, "0x%016llx..0x%016llx\n", + (unsigned long long)reg->base, + (unsigned long long)(reg->base + reg->size - 1)); + + } + return 0; +} + +static int memblock_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, memblock_debug_show, inode->i_private); +} + +static const struct file_operations memblock_debug_fops = { + .open = memblock_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init memblock_init_debugfs(void) +{ + struct dentry *root = debugfs_create_dir("memblock", NULL); + if (!root) + return -ENXIO; + debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); + debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); + + return 0; +} +__initcall(memblock_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ -- cgit v1.2.3 From 5e63cf43af844ed30acc278b38b8c9bc51eba493 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 28 Jul 2010 15:07:21 +1000 Subject: memblock: Expose some memblock bits for use by x86 This exposes memblock_debug and associated memblock_dbg() macro, along with memblock_can_resize so that x86 can use these when ported to use memblock Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index cc15be29fd0a..5499ab162b9d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -22,7 +22,8 @@ struct memblock memblock; -static int memblock_debug, memblock_can_resize; +int memblock_debug; +int memblock_can_resize; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; -- cgit v1.2.3 From ea9e4376bb545e400a325b3d76fecd02815303c0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 28 Jul 2010 15:13:22 +1000 Subject: memblock: Improve debug output when resizing the reserve array Print out the location info in addition to which array is being resized. Also use memblocK_dbg() to put that under control of the memblock_debug flag. Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 5499ab162b9d..c3703abf057e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -192,8 +192,6 @@ static int memblock_double_array(struct memblock_type *type) if (!memblock_can_resize) return -1; - pr_debug("memblock: %s array full, doubling...", memblock_type_name(type)); - /* Calculate new doubled size */ old_size = type->max * sizeof(struct memblock_region); new_size = old_size << 1; @@ -221,6 +219,9 @@ static int memblock_double_array(struct memblock_type *type) } new_array = __va(addr); + memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", + memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); + /* Found space, we now need to move the array over before * we add the reserved region since it may be our reserved * array itself that is full. @@ -672,7 +673,7 @@ static void memblock_dump(struct memblock_type *region, char *name) base = region->regions[i].base; size = region->regions[i].size; - pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", name, i, base, base + size - 1, size); } } -- cgit v1.2.3 From 37d8d4bf489e39eedc9537f8616fe87879b13cb0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 28 Jul 2010 15:20:58 +1000 Subject: memblock: Export MEMBLOCK_ERROR will used by x86 memblock_x86_find_in_range_node and nobootmem replacement Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index c3703abf057e..85cfa1d3ab28 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -27,8 +27,6 @@ int memblock_can_resize; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; -#define MEMBLOCK_ERROR (~(phys_addr_t)0) - /* inline so we don't get a warning when pr_debug is compiled out */ static inline const char *memblock_type_name(struct memblock_type *type) { -- cgit v1.2.3 From 25818f0f288cd5333ba5a90ad6dde3def4c4ff58 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 28 Jul 2010 15:25:10 +1000 Subject: memblock: Make MEMBLOCK_ERROR be 0 And ensure we don't hand out 0 as a valid allocation. We put the low limit at PAGE_SIZE arbitrarily. Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 85cfa1d3ab28..cb520df2a414 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -105,6 +105,12 @@ static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t en phys_addr_t base, res_base; long j; + /* Prevent allocations returning 0 as it's also used to + * indicate an allocation failure + */ + if (start == 0) + start = PAGE_SIZE; + base = memblock_align_down((end - size), align); while (start <= base) { j = memblock_overlaps_region(&memblock.reserved, base, size); -- cgit v1.2.3 From 10d0643988e976360eb3497dcafb55b393b8e480 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 28 Jul 2010 15:43:02 +1000 Subject: memblock: Option for the architecture to put memblock into the .init section Arch code can define ARCH_DISCARD_MEMBLOCK in asm/memblock.h, which in turns causes memblock code and data to go respectively into the .init and .initdata sections. This will be used by the x86 architecture. If ARCH_DISCARD_MEMBLOCK is defined, the debugfs files to inspect the memblock arrays after boot are not created. Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index cb520df2a414..a17faea37d47 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -20,12 +20,12 @@ #include #include -struct memblock memblock; +struct memblock memblock __initdata_memblock; -int memblock_debug; -int memblock_can_resize; -static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; -static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; +int memblock_debug __initdata_memblock; +int memblock_can_resize __initdata_memblock; +static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; +static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; /* inline so we don't get a warning when pr_debug is compiled out */ static inline const char *memblock_type_name(struct memblock_type *type) @@ -42,23 +42,23 @@ static inline const char *memblock_type_name(struct memblock_type *type) * Address comparison utilities */ -static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) +static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size) { return addr & ~(size - 1); } -static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) +static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size) { return (addr + (size - 1)) & ~(size - 1); } -static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, +static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2, phys_addr_t size2) { return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } -static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, +static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2, phys_addr_t size2) { if (base2 == base1 + size1) @@ -69,7 +69,7 @@ static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, return 0; } -static long memblock_regions_adjacent(struct memblock_type *type, +static long __init_memblock memblock_regions_adjacent(struct memblock_type *type, unsigned long r1, unsigned long r2) { phys_addr_t base1 = type->regions[r1].base; @@ -80,7 +80,7 @@ static long memblock_regions_adjacent(struct memblock_type *type, return memblock_addrs_adjacent(base1, size1, base2, size2); } -long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; @@ -162,7 +162,7 @@ static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align return MEMBLOCK_ERROR; } -static void memblock_remove_region(struct memblock_type *type, unsigned long r) +static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { unsigned long i; @@ -174,7 +174,7 @@ static void memblock_remove_region(struct memblock_type *type, unsigned long r) } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void memblock_coalesce_regions(struct memblock_type *type, +static void __init_memblock memblock_coalesce_regions(struct memblock_type *type, unsigned long r1, unsigned long r2) { type->regions[r1].size += type->regions[r2].size; @@ -184,7 +184,7 @@ static void memblock_coalesce_regions(struct memblock_type *type, /* Defined below but needed now */ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); -static int memblock_double_array(struct memblock_type *type) +static int __init_memblock memblock_double_array(struct memblock_type *type) { struct memblock_region *new_array, *old_array; phys_addr_t old_size, new_size, addr; @@ -255,13 +255,13 @@ static int memblock_double_array(struct memblock_type *type) return 0; } -extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, +extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, phys_addr_t addr2, phys_addr_t size2) { return 1; } -static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long coalesced = 0; long adjacent, i; @@ -348,13 +348,13 @@ static long memblock_add_region(struct memblock_type *type, phys_addr_t base, ph return 0; } -long memblock_add(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { return memblock_add_region(&memblock.memory, base, size); } -static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) +static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { phys_addr_t rgnbegin, rgnend; phys_addr_t end = base + size; @@ -402,7 +402,7 @@ static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys return memblock_add_region(type, end, rgnend - end); } -long memblock_remove(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) { return __memblock_remove(&memblock.memory, base, size); } @@ -568,7 +568,7 @@ phys_addr_t __init memblock_phys_mem_size(void) return memblock.memory_size; } -phys_addr_t memblock_end_of_DRAM(void) +phys_addr_t __init_memblock memblock_end_of_DRAM(void) { int idx = memblock.memory.cnt - 1; @@ -655,7 +655,7 @@ int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) memblock.reserved.regions[idx].size) >= (base + size); } -int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) +int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; } @@ -666,7 +666,7 @@ void __init memblock_set_current_limit(phys_addr_t limit) memblock.current_limit = limit; } -static void memblock_dump(struct memblock_type *region, char *name) +static void __init_memblock memblock_dump(struct memblock_type *region, char *name) { unsigned long long base, size; int i; @@ -682,7 +682,7 @@ static void memblock_dump(struct memblock_type *region, char *name) } } -void memblock_dump_all(void) +void __init_memblock memblock_dump_all(void) { if (!memblock_debug) return; @@ -748,7 +748,7 @@ static int __init early_memblock(char *p) } early_param("memblock", early_memblock); -#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) static int memblock_debug_show(struct seq_file *m, void *private) { -- cgit v1.2.3 From 5303b68f57c227c27193a14e57dd12be27cd670f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 28 Jul 2010 15:38:40 +1000 Subject: memblock: Add memblock_find_in_range() This is a wrapper for memblock_find_base() using slightly different arguments (start,end instead of start,size for example) in order to make it easier to convert existing arch/x86 code. Signed-off-by: Yinghai Lu Signed-off-by: Benjamin Herrenschmidt --- mm/memblock.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index a17faea37d47..b7ab10a2ef46 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -162,6 +162,14 @@ static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align return MEMBLOCK_ERROR; } +/* + * Find a free area with specified alignment in a specific range. + */ +u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) +{ + return memblock_find_base(size, align, start, end); +} + static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { unsigned long i; -- cgit v1.2.3 From 7950c407c0288b223a200c1bba8198941599ca37 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:14 -0700 Subject: memblock: Add memblock_free/reserve_reserved_regions() So we can avoid export memblock_reserved_init_regions() Suggested by Ben. -v2: use __init_memblock attribute Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Signed-off-by: H. Peter Anvin --- mm/memblock.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index b7ab10a2ef46..65e3ba8d09fb 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -170,6 +170,30 @@ u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 ali return memblock_find_base(size, align, start, end); } +/* + * Free memblock.reserved.regions + */ +int __init_memblock memblock_free_reserved_regions(void) +{ + if (memblock.reserved.regions == memblock_reserved_init_regions) + return 0; + + return memblock_free(__pa(memblock.reserved.regions), + sizeof(struct memblock_region) * memblock.reserved.max); +} + +/* + * Reserve memblock.reserved.regions + */ +int __init_memblock memblock_reserve_reserved_regions(void) +{ + if (memblock.reserved.regions == memblock_reserved_init_regions) + return 0; + + return memblock_reserve(__pa(memblock.reserved.regions), + sizeof(struct memblock_region) * memblock.reserved.max); +} + static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { unsigned long i; -- cgit v1.2.3 From f88eff74aa848e58b1ea49768c0bbb874b31357f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: bootmem, x86: Add weak version of reserve_bootmem_generic It will be used memblock_x86_to_bootmem converting It is an wrapper for reserve_bootmem, and x86 64bit is using special one. Also clean up that version for x86_64. We don't need to take care of numa path for that, bootmem can handle it how Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- mm/bootmem.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index 142c84a54993..bde170dd2fde 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -526,6 +526,12 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, } #ifndef CONFIG_NO_BOOTMEM +int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, + int flags) +{ + return reserve_bootmem(phys, len, flags); +} + static unsigned long __init align_idx(struct bootmem_data *bdata, unsigned long idx, unsigned long step) { -- cgit v1.2.3 From edbe7d23b4482e7f33179290bcff3b1feae1c5f3 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: memblock: Add find_memory_core_early() According to node range in early_node_map[] with __memblock_find_in_range to find free range. Will be used by memblock_x86_find_in_range_node() memblock_x86_find_in_range_node will be used to find right buffer for NODE_DATA Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- mm/page_alloc.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9bd339eb04c6..8c9b34674d83 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -3612,6 +3613,41 @@ void __init free_bootmem_with_active_regions(int nid, } } +#ifdef CONFIG_HAVE_MEMBLOCK +u64 __init find_memory_core_early(int nid, u64 size, u64 align, + u64 goal, u64 limit) +{ + int i; + + /* Need to go over early_node_map to find out good range for node */ + for_each_active_range_index_in_nid(i, nid) { + u64 addr; + u64 ei_start, ei_last; + u64 final_start, final_end; + + ei_last = early_node_map[i].end_pfn; + ei_last <<= PAGE_SHIFT; + ei_start = early_node_map[i].start_pfn; + ei_start <<= PAGE_SHIFT; + + final_start = max(ei_start, goal); + final_end = min(ei_last, limit); + + if (final_start >= final_end) + continue; + + addr = memblock_find_in_range(final_start, final_end, size, align); + + if (addr == MEMBLOCK_ERROR) + continue; + + return addr; + } + + return MEMBLOCK_ERROR; +} +#endif + int __init add_from_early_node_map(struct range *range, int az, int nr_range, int nid) { -- cgit v1.2.3 From 72d7c3b33c980843e756681fb4867dc1efd62a76 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86: Use memblock to replace early_res 1. replace find_e820_area with memblock_find_in_range 2. replace reserve_early with memblock_x86_reserve_range 3. replace free_early with memblock_x86_free_range. 4. NO_BOOTMEM will switch to use memblock too. 5. use _e820, _early wrap in the patch, in following patch, will replace them all 6. because memblock_x86_free_range support partial free, we can remove some special care 7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill() so adjust some calling later in setup.c::setup_arch() -- corruption_check and mptable_update -v2: Move reserve_brk() early Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range() that could happen We have more then 128 RAM entry in E820 tables, and memblock_x86_fill() could use memblock_find_in_range() to find a new place for memblock.memory.region array. and We don't need to use extend_brk() after fill_memblock_area() So move reserve_brk() early before fill_memblock_area(). -v3: Move find_smp_config early To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable in right place. -v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in memblock.reserved already.. use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later. -v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit active_region for 32bit does include high pages need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped() -v6: Use current_limit instead -v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L -v8: Set memblock_can_resize early to handle EFI with more RAM entries -v9: update after kmemleak changes in mainline Suggested-by: David S. Miller Suggested-by: Benjamin Herrenschmidt Suggested-by: Thomas Gleixner Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- mm/bootmem.c | 3 +++ mm/page_alloc.c | 50 +++++++++++++++----------------------------------- mm/sparse-vmemmap.c | 11 ----------- 3 files changed, 18 insertions(+), 46 deletions(-) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index bde170dd2fde..fda01a2c31af 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -434,6 +435,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM + kmemleak_free_part(__va(physaddr), size); free_early(physaddr, physaddr + size); #else unsigned long start, end; @@ -459,6 +461,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, void __init free_bootmem(unsigned long addr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM + kmemleak_free_part(__va(addr), size); free_early(addr, addr + size); #else unsigned long start, end; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8c9b34674d83..f2cd7450fa76 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3667,46 +3667,26 @@ int __init add_from_early_node_map(struct range *range, int az, void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit) { - int i; void *ptr; + u64 addr; - if (limit > get_max_mapped()) - limit = get_max_mapped(); + if (limit > memblock.current_limit) + limit = memblock.current_limit; - /* need to go over early_node_map to find out good range for node */ - for_each_active_range_index_in_nid(i, nid) { - u64 addr; - u64 ei_start, ei_last; + addr = find_memory_core_early(nid, size, align, goal, limit); - ei_last = early_node_map[i].end_pfn; - ei_last <<= PAGE_SHIFT; - ei_start = early_node_map[i].start_pfn; - ei_start <<= PAGE_SHIFT; - addr = find_early_area(ei_start, ei_last, - goal, limit, size, align); - - if (addr == -1ULL) - continue; - -#if 0 - printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", - nid, - ei_start, ei_last, goal, limit, size, - align, addr); -#endif - - ptr = phys_to_virt(addr); - memset(ptr, 0, size); - reserve_early_without_check(addr, addr + size, "BOOTMEM"); - /* - * The min_count is set to 0 so that bootmem allocated blocks - * are never reported as leaks. - */ - kmemleak_alloc(ptr, size, 0, 0); - return ptr; - } + if (addr == MEMBLOCK_ERROR) + return NULL; - return NULL; + ptr = phys_to_virt(addr); + memset(ptr, 0, size); + memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); + /* + * The min_count is set to 0 so that bootmem allocated blocks + * are never reported as leaks. + */ + kmemleak_alloc(ptr, size, 0, 0); + return ptr; } #endif diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index aa33fd67fa41..29d6cbffb283 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, if (vmemmap_buf_start) { /* need to free left buf */ -#ifdef CONFIG_NO_BOOTMEM - free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); - if (vmemmap_buf_start < vmemmap_buf) { - char name[15]; - - snprintf(name, sizeof(name), "MEMMAP %d", nodeid); - reserve_early_without_check(__pa(vmemmap_buf_start), - __pa(vmemmap_buf), name); - } -#else free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); -#endif vmemmap_buf = NULL; vmemmap_buf_end = NULL; } -- cgit v1.2.3 From a9ce6bc15100023b411f8117e53a016d61889800 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Replace e820_/_early string with memblock_ 1.include linux/memblock.h directly. so later could reduce e820.h reference. 2 this patch is done by sed scripts mainly -v2: use MEMBLOCK_ERROR instead of -1ULL or -1UL Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- mm/bootmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/bootmem.c b/mm/bootmem.c index fda01a2c31af..13b0caa9793c 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -436,7 +436,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, { #ifdef CONFIG_NO_BOOTMEM kmemleak_free_part(__va(physaddr), size); - free_early(physaddr, physaddr + size); + memblock_x86_free_range(physaddr, physaddr + size); #else unsigned long start, end; @@ -462,7 +462,7 @@ void __init free_bootmem(unsigned long addr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM kmemleak_free_part(__va(addr), size); - free_early(addr, addr + size); + memblock_x86_free_range(addr, addr + size); #else unsigned long start, end; -- cgit v1.2.3 From 3661ca66a42e306aaf53246fb75aec1ea01be0f0 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 15 Sep 2010 13:05:29 -0700 Subject: memblock: Fix section mismatch warnings Stephen found a bunch of section mismatch warnings with the new memblock changes. Use __init_memblock to replace __init in memblock.c and remove __init in memblock.h. We should not use __init in header files. Reported-by: Stephen Rothwell Tested-by: Stephen Rothwell Signed-off-by: Yinghai Lu Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt LKML-Reference: <4C912709.2090201@kernel.org> Signed-off-by: Ingo Molnar --- mm/memblock.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 65e3ba8d09fb..d5d63ac1fd83 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -125,8 +125,8 @@ static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t en return MEMBLOCK_ERROR; } -static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end) +static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, + phys_addr_t align, phys_addr_t start, phys_addr_t end) { long i; @@ -439,12 +439,12 @@ long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) return __memblock_remove(&memblock.memory, base, size); } -long __init memblock_free(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) { return __memblock_remove(&memblock.reserved, base, size); } -long __init memblock_reserve(phys_addr_t base, phys_addr_t size) +long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { struct memblock_type *_rgn = &memblock.reserved; @@ -671,12 +671,12 @@ int __init memblock_is_reserved(phys_addr_t addr) return memblock_search(&memblock.reserved, addr) != -1; } -int memblock_is_memory(phys_addr_t addr) +int __init_memblock memblock_is_memory(phys_addr_t addr) { return memblock_search(&memblock.memory, addr) != -1; } -int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) +int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.reserved, base); @@ -693,7 +693,7 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si } -void __init memblock_set_current_limit(phys_addr_t limit) +void __init_memblock memblock_set_current_limit(phys_addr_t limit) { memblock.current_limit = limit; } -- cgit v1.2.3 From f1af98c7629a1b76fd7336decbc776acdeed2120 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 4 Oct 2010 14:57:39 -0700 Subject: memblock: Fix wraparound in find_region() When trying to find huge range for crashkernel, get [ 0.000000] ------------[ cut here ]------------ [ 0.000000] WARNING: at arch/x86/mm/memblock.c:248 memblock_x86_reserve_range+0x40/0x7a() [ 0.000000] Hardware name: Sun Fire x4800 [ 0.000000] memblock_x86_reserve_range: wrong range [0xffffffff37000000, 0x137000000) [ 0.000000] Modules linked in: [ 0.000000] Pid: 0, comm: swapper Not tainted 2.6.36-rc5-tip-yh-01876-g1cac214-dirty #59 [ 0.000000] Call Trace: [ 0.000000] [] ? memblock_x86_reserve_range+0x40/0x7a [ 0.000000] [] warn_slowpath_common+0x85/0x9e [ 0.000000] [] warn_slowpath_fmt+0x6e/0x70 [ 0.000000] [] ? memblock_find_region+0x40/0x78 [ 0.000000] [] ? memblock_find_base+0x9a/0xb9 [ 0.000000] [] memblock_x86_reserve_range+0x40/0x7a [ 0.000000] [] setup_arch+0x99d/0xb2a [ 0.000000] [] ? trace_hardirqs_off+0xd/0xf [ 0.000000] [] ? _raw_spin_unlock_irqrestore+0x3d/0x4c [ 0.000000] [] start_kernel+0xde/0x3f1 [ 0.000000] [] x86_64_start_reservations+0xa0/0xa4 [ 0.000000] [] x86_64_start_kernel+0x106/0x10d [ 0.000000] ---[ end trace a7919e7f17c0a725 ]--- [ 0.000000] Reserving 8192MB of memory at 17592186041200MB for crashkernel (System RAM: 526336MB) This is caused by a wraparound in the test due to size > end; explicitly check for this condition and fail. Signed-off-by: Yinghai Lu LKML-Reference: <4CAA4DD3.1080401@kernel.org> Signed-off-by: H. Peter Anvin --- mm/memblock.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index d5d63ac1fd83..9ad39690a2bd 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -105,13 +105,18 @@ static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t en phys_addr_t base, res_base; long j; + /* In case, huge size is requested */ + if (end < size) + return MEMBLOCK_ERROR; + + base = memblock_align_down((end - size), align); + /* Prevent allocations returning 0 as it's also used to * indicate an allocation failure */ if (start == 0) start = PAGE_SIZE; - base = memblock_align_down((end - size), align); while (start <= base) { j = memblock_overlaps_region(&memblock.reserved, base, size); if (j < 0) -- cgit v1.2.3 From 236260b90dd94516982ad67aa6f5449c4c37db7b Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 6 Oct 2010 15:52:29 -0700 Subject: memblock: Allow memblock_init to be called early The Xen setup code needs to call memblock_x86_reserve_range() very early, so allow it to initialize the memblock subsystem before doing so. The second memblock_init() is ignored. Signed-off-by: Jeremy Fitzhardinge Cc: Yinghai Lu Cc: Benjamin Herrenschmidt LKML-Reference: <4CACFDAD.3090900@goop.org> Signed-off-by: H. Peter Anvin --- mm/memblock.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 9ad39690a2bd..ae8b06c828c7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -752,6 +752,12 @@ void __init memblock_analyze(void) void __init memblock_init(void) { + static int init_done __initdata = 0; + + if (init_done) + return; + init_done = 1; + /* Hookup the initial arrays */ memblock.memory.regions = memblock_memory_init_regions; memblock.memory.max = INIT_MEMBLOCK_REGIONS; -- cgit v1.2.3 From cd79481d27b9f90aad80c9b972292c42c25bbf8e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 11 Oct 2010 12:34:09 -0700 Subject: memblock: Annotate memblock functions with __init_memblock Stephen found WARNING: mm/built-in.o(.text+0x25ab8): Section mismatch in reference from the function memblock_find_base() to the function .init.text:memblock_find_region() The function memblock_find_base() references the function __init memblock_find_region(). This is often because memblock_find_base lacks a __init annotation or the annotation of memblock_find_region is wrong. So let memblock_find_region() to use __init_memblock instead of __init directly. Also fix one function that did not have __init* to be __init_memblock. Reported-by: Stephen Rothwell Signed-off-by: Yinghai Lu LKML-Reference: <4CB366B1.40405@kernel.org> Signed-off-by: H. Peter Anvin --- mm/memblock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index ae8b06c828c7..400dc62697d7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -99,7 +99,7 @@ long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_a * are top-down. */ -static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, +static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) { phys_addr_t base, res_base; @@ -653,7 +653,7 @@ void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) } } -static int memblock_search(struct memblock_type *type, phys_addr_t addr) +static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) { unsigned int left = 0, right = type->cnt; -- cgit v1.2.3