summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-08-30 16:41:02 -0700
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2010-10-22 12:57:27 -0700
commit42ee1471e9b879479a15debac752314a596c738e (patch)
tree119d3a47d8561a5447b8de882fa45362cb34b8f7
parent35ae11fd146384d222f3bb1f17eed1970cc92c36 (diff)
downloadlinux-42ee1471e9b879479a15debac752314a596c738e.tar.gz
linux-42ee1471e9b879479a15debac752314a596c738e.tar.bz2
linux-42ee1471e9b879479a15debac752314a596c738e.zip
xen: implement "extra" memory to reserve space for pages not present at boot
When using the e820 map to get the initial pseudo-physical address space, look for either Xen-provided memory which doesn't lie within an E820 region, or an E820 RAM region which extends beyond the Xen-provided memory range. Count these pages, and add them to a new "extra memory" range. This range has an E820 RAM range to describe it - so the kernel will allocate page structures for it - but it is also marked reserved so that the kernel will not attempt to use it. The balloon driver can then add this range as a set of currently ballooned-out pages, which can be used to extend the domain beyond its original size. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
-rw-r--r--arch/x86/xen/setup.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dd2eb2a9303f..f9a99eaddcdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -34,6 +34,26 @@ extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
+/* Amount of extra memory space we add to the e820 ranges */
+phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+
+static __init void xen_add_extra_mem(unsigned long pages)
+{
+ u64 size = (u64)pages * PAGE_SIZE;
+
+ if (!pages)
+ return;
+
+ e820_add_region(xen_extra_mem_start + xen_extra_mem_size, size, E820_RAM);
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+
+ reserve_early(xen_extra_mem_start + xen_extra_mem_size,
+ xen_extra_mem_start + xen_extra_mem_size + size,
+ "XEN EXTRA");
+
+ xen_extra_mem_size += size;
+}
+
static unsigned long __init xen_release_chunk(phys_addr_t start_addr,
phys_addr_t end_addr)
{
@@ -105,7 +125,6 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
-
char * __init xen_memory_setup(void)
{
static struct e820entry map[E820MAX] __initdata;
@@ -114,6 +133,7 @@ char * __init xen_memory_setup(void)
unsigned long long mem_end;
int rc;
struct xen_memory_map memmap;
+ unsigned long extra_pages = 0;
int i;
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
@@ -135,6 +155,7 @@ char * __init xen_memory_setup(void)
BUG_ON(rc);
e820.nr_map = 0;
+ xen_extra_mem_start = mem_end;
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end = map[i].addr + map[i].size;
if (map[i].type == E820_RAM) {
@@ -143,6 +164,8 @@ char * __init xen_memory_setup(void)
if (end > mem_end) {
/* Truncate region to max_mem. */
map[i].size -= end - mem_end;
+
+ extra_pages += PFN_DOWN(end - mem_end);
}
}
if (map[i].size > 0)
@@ -169,7 +192,9 @@ char * __init xen_memory_setup(void)
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+ extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
+
+ xen_add_extra_mem(extra_pages);
return "Xen";
}