summaryrefslogtreecommitdiffstats
path: root/lib/lmb.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-03-24 20:50:48 +1100
committerPaul Mackerras <paulus@samba.org>2008-04-15 21:22:17 +1000
commitc50f68c8aea421267ba7995b1c485c281b28add6 (patch)
tree38d72f3d6c9e43a4653cc7e330af0aa0dfca3dd5 /lib/lmb.c
parent4b1d99b37f608b8cc03550033b16212ca9362efd (diff)
downloadlinux-c50f68c8aea421267ba7995b1c485c281b28add6.tar.gz
linux-c50f68c8aea421267ba7995b1c485c281b28add6.tar.bz2
linux-c50f68c8aea421267ba7995b1c485c281b28add6.zip
[LMB] Add lmb_alloc_nid()
A variant of lmb_alloc() that tries to allocate memory on a specified NUMA node 'nid' but falls back to normal lmb_alloc() if that fails. The caller provides a 'nid_range' function pointer which assists the allocator. It is given args 'start', 'end', and pointer to integer 'this_nid'. It places at 'this_nid' the NUMA node id that corresponds to 'start', and returns the end address within 'start' to 'end' at which memory assosciated with 'nid' ends. This callback allows a platform to use lmb_alloc_nid() in just about any context, even ones in which early_pfn_to_nid() might not be working yet. This function will be used by the NUMA setup code on sparc64, and also it can be used by powerpc, replacing it's hand crafted "careful_allocation()" function in arch/powerpc/mm/numa.c If x86 ever converts it's NUMA support over to using the LMB helpers, it can use this too as it has something entirely similar. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c86
1 files changed, 76 insertions, 10 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index 3c43b95fef4a..549fbb3d70cf 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -232,6 +232,82 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base,
return (i < rgn->cnt) ? i : -1;
}
+static u64 lmb_align_down(u64 addr, u64 size)
+{
+ return addr & ~(size - 1);
+}
+
+static u64 lmb_align_up(u64 addr, u64 size)
+{
+ return (addr + (size - 1)) & ~(size - 1);
+}
+
+static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 base;
+ long j;
+
+ base = lmb_align_down((end - size), align);
+ while (start <= base &&
+ ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0))
+ base = lmb_align_down(lmb.reserved.region[j].base - size,
+ align);
+
+ if (base != 0 && start <= base) {
+ if (lmb_add_region(&lmb.reserved, base,
+ lmb_align_up(size, align)) < 0)
+ base = ~(u64)0;
+ return base;
+ }
+
+ return ~(u64)0;
+}
+
+static u64 __init lmb_alloc_nid_region(struct lmb_property *mp,
+ u64 (*nid_range)(u64, u64, int *),
+ u64 size, u64 align, int nid)
+{
+ u64 start, end;
+
+ start = mp->base;
+ end = start + mp->size;
+
+ start = lmb_align_up(start, align);
+ while (start < end) {
+ u64 this_end;
+ int this_nid;
+
+ this_end = nid_range(start, end, &this_nid);
+ if (this_nid == nid) {
+ u64 ret = lmb_alloc_nid_unreserved(start, this_end,
+ size, align);
+ if (ret != ~(u64)0)
+ return ret;
+ }
+ start = this_end;
+ }
+
+ return ~(u64)0;
+}
+
+u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
+ u64 (*nid_range)(u64 start, u64 end, int *nid))
+{
+ struct lmb_region *mem = &lmb.memory;
+ int i;
+
+ for (i = 0; i < mem->cnt; i++) {
+ u64 ret = lmb_alloc_nid_region(&mem->region[i],
+ nid_range,
+ size, align, nid);
+ if (ret != ~(u64)0)
+ return ret;
+ }
+
+ return lmb_alloc(size, align);
+}
+
u64 __init lmb_alloc(u64 size, u64 align)
{
return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
@@ -250,16 +326,6 @@ u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr)
return alloc;
}
-static u64 lmb_align_down(u64 addr, u64 size)
-{
- return addr & ~(size - 1);
-}
-
-static u64 lmb_align_up(u64 addr, u64 size)
-{
- return (addr + (size - 1)) & ~(size - 1);
-}
-
u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
{
long i, j;