summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-07-12 10:46:34 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2011-07-14 11:45:35 -0700
commite64980405cc6aa74ef178d8d9aa4018c867ceed1 (patch)
treea405d3de9c244ed2fb7899bdb66b1e1569aeae97 /mm
parent34e1845548418e5cecee0568ba721e1f089c092c (diff)
downloadlinux-e64980405cc6aa74ef178d8d9aa4018c867ceed1.tar.gz
linux-e64980405cc6aa74ef178d8d9aa4018c867ceed1.tar.bz2
linux-e64980405cc6aa74ef178d8d9aa4018c867ceed1.zip
memblock: Separate out memblock_find_in_range_node()
Node affine memblock allocation logic is currently implemented across memblock_alloc_nid() and memblock_alloc_nid_region(). This reorganizes it such that it resembles that of non-NUMA allocation API. Area finding is collected and moved into new exported function memblock_find_in_range_node() which is symmetrical to non-NUMA counterpart - it handles @start/@end and understands ANYWHERE and ACCESSIBLE. memblock_alloc_nid() now simply calls memblock_find_in_range_node() and reserves the returned area. This makes memblock_alloc[_try]_nid() observe ACCESSIBLE limit on node affine allocations too (again, this doesn't make any difference for the current sole user - sparc64). Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1310460395-30913-8-git-send-email-tj@kernel.org Cc: Yinghai Lu <yinghai@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 447cf64304ba..a8edb422795b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -521,49 +521,56 @@ static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
return start;
}
-static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
+phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
+ phys_addr_t end,
phys_addr_t size,
phys_addr_t align, int nid)
{
- phys_addr_t start, end;
+ struct memblock_type *mem = &memblock.memory;
+ int i;
- start = mp->base;
- end = start + mp->size;
+ BUG_ON(0 == size);
- while (start < end) {
- phys_addr_t this_start;
- int this_nid;
+ /* Pump up max_addr */
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+ end = memblock.current_limit;
- this_start = memblock_nid_range_rev(start, end, &this_nid);
- if (this_nid == nid) {
- phys_addr_t ret = memblock_find_region(this_start, end, size, align);
- if (ret &&
- !memblock_add_region(&memblock.reserved, ret, size))
- return ret;
+ for (i = mem->cnt - 1; i >= 0; i--) {
+ struct memblock_region *r = &mem->regions[i];
+ phys_addr_t base = max(start, r->base);
+ phys_addr_t top = min(end, r->base + r->size);
+
+ while (base < top) {
+ phys_addr_t tbase, ret;
+ int tnid;
+
+ tbase = memblock_nid_range_rev(base, top, &tnid);
+ if (nid == MAX_NUMNODES || tnid == nid) {
+ ret = memblock_find_region(tbase, top, size, align);
+ if (ret)
+ return ret;
+ }
+ top = tbase;
}
- end = this_start;
}
+
return 0;
}
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
- struct memblock_type *mem = &memblock.memory;
- int i;
-
- BUG_ON(0 == size);
+ phys_addr_t found;
- /* We align the size to limit fragmentation. Without this, a lot of
+ /*
+ * We align the size to limit fragmentation. Without this, a lot of
* small allocs quickly eat up the whole reserve array on sparc
*/
size = round_up(size, align);
- for (i = mem->cnt - 1; i >= 0; i--) {
- phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
- size, align, nid);
- if (ret)
- return ret;
- }
+ found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
+ size, align, nid);
+ if (found && !memblock_add_region(&memblock.reserved, found, size))
+ return found;
return 0;
}