diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/rtc.c | 23 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 3 | ||||
-rw-r--r-- | arch/x86/pci/acpi.c | 11 | ||||
-rw-r--r-- | arch/x86/platform/mrst/mrst.c | 22 | ||||
-rw-r--r-- | arch/x86/platform/mrst/vrtc.c | 9 | ||||
-rw-r--r-- | arch/x86/xen/Kconfig | 8 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 52 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 119 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 284 |
12 files changed, 277 insertions, 266 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 7ff4669580cf..c34f96c2f7a0 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -12,6 +12,7 @@ #include <asm/pgtable.h> #include <xen/interface/xen.h> +#include <xen/grant_table.h> #include <xen/features.h> /* Xen machine address */ @@ -48,14 +49,11 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e); extern int m2p_add_override(unsigned long mfn, struct page *page, - bool clear_pte); + struct gnttab_map_grant_ref *kmap_op); extern int m2p_remove_override(struct page *page, bool clear_pte); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); -#ifdef CONFIG_XEN_DEBUG_FS -extern int p2m_dump_show(struct seq_file *m, void *v); -#endif static inline unsigned long pfn_to_mfn(unsigned long pfn) { unsigned long mfn; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index f1a6244d7d93..794bc95134cd 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); /* * Undefined/reserved opcodes, conditional jump, Opcode Extension * Groups, and some special opcodes can not boost. + * This is non-const to keep gcc from statically optimizing it out, as + * variable_test_bit makes gcc think only *(unsigned long*) is used. */ -static const u32 twobyte_is_boostable[256 / 32] = { +static u32 twobyte_is_boostable[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 3f2ad2640d85..ccdbc16b8941 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes, cmos_minutes; unsigned char save_control, save_freq_select; + unsigned long flags; int retval = 0; + spin_lock_irqsave(&rtc_lock, flags); + /* tell the clock it's being set */ save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); @@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime) CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + spin_unlock_irqrestore(&rtc_lock, flags); + return retval; } unsigned long mach_get_cmos_time(void) { unsigned int status, year, mon, day, hour, min, sec, century = 0; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); /* * If UIP is clear, then we have >= 244 microseconds before @@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void) status = CMOS_READ(RTC_CONTROL); WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); + spin_unlock_irqrestore(&rtc_lock, flags); + if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { sec = bcd2bin(sec); min = bcd2bin(min); @@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write); int update_persistent_clock(struct timespec now) { - unsigned long flags; - int retval; - - spin_lock_irqsave(&rtc_lock, flags); - retval = x86_platform.set_wallclock(now.tv_sec); - spin_unlock_irqrestore(&rtc_lock, flags); - - return retval; + return x86_platform.set_wallclock(now.tv_sec); } /* not static: needed by APM */ void read_persistent_clock(struct timespec *ts) { - unsigned long retval, flags; + unsigned long retval; - spin_lock_irqsave(&rtc_lock, flags); retval = x86_platform.get_wallclock(); - spin_unlock_irqrestore(&rtc_lock, flags); ts->tv_sec = retval; ts->tv_nsec = 0; diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 18ae83dd1cd7..b56c65de384d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), }; -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; +static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; static int __init vsyscall_setup(char *str) { diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 30326443ab81..87488b93a65c 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 /* for fixmap */ tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); - - good_end = max_pfn_mapped << PAGE_SHIFT; #endif + good_end = max_pfn_mapped << PAGE_SHIFT; base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); if (base == MEMBLOCK_ERROR) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 039d91315bc5..404f21a3ff9e 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */ + /* 2006 AMD HT/VIA system with two host bridges */ + { + .callback = set_use_crs, + .ident = "ASUS M2V-MX SE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"), + DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), + }, + }, {} }; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 58425adc22c6..fe73276e026b 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) pentry = (struct sfi_device_table_entry *)sb->pentry; for (i = 0; i < num; i++, pentry++) { - if (pentry->irq != (u8)0xff) { /* native RTE case */ + int irq = pentry->irq; + + if (irq != (u8)0xff) { /* native RTE case */ /* these SPI2 devices are not exposed to system as PCI * devices, but they have separate RTE entry in IOAPIC * so we have to enable them one by one here */ - ioapic = mp_find_ioapic(pentry->irq); + ioapic = mp_find_ioapic(irq); irq_attr.ioapic = ioapic; - irq_attr.ioapic_pin = pentry->irq; + irq_attr.ioapic_pin = irq; irq_attr.trigger = 1; irq_attr.polarity = 1; - io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); + io_apic_set_pci_routing(NULL, irq, &irq_attr); } else - pentry->irq = 0; /* No irq */ + irq = 0; /* No irq */ switch (pentry->type) { case SFI_DEV_TYPE_IPC: /* ID as IRQ is a hack that will go away */ - pdev = platform_device_alloc(pentry->name, pentry->irq); + pdev = platform_device_alloc(pentry->name, irq); if (pdev == NULL) { pr_err("out of memory for SFI platform device '%s'.\n", pentry->name); continue; } - install_irq_resource(pdev, pentry->irq); + install_irq_resource(pdev, irq); pr_debug("info[%2d]: IPC bus, name = %16.16s, " - "irq = 0x%2x\n", i, pentry->name, pentry->irq); + "irq = 0x%2x\n", i, pentry->name, irq); sfi_handle_ipc_dev(pdev); break; case SFI_DEV_TYPE_SPI: memset(&spi_info, 0, sizeof(spi_info)); strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); - spi_info.irq = pentry->irq; + spi_info.irq = irq; spi_info.bus_num = pentry->host_num; spi_info.chip_select = pentry->addr; spi_info.max_speed_hz = pentry->max_freq; @@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) memset(&i2c_info, 0, sizeof(i2c_info)); bus = pentry->host_num; strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); - i2c_info.irq = pentry->irq; + i2c_info.irq = irq; i2c_info.addr = pentry->addr; pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " "irq = 0x%2x, addr = 0x%x\n", i, bus, diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 73d70d65e76e..6d5dbcdd444a 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c @@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write); unsigned long vrtc_get_time(void) { u8 sec, min, hour, mday, mon; + unsigned long flags; u32 year; + spin_lock_irqsave(&rtc_lock, flags); + while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) cpu_relax(); @@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void) mon = vrtc_cmos_read(RTC_MONTH); year = vrtc_cmos_read(RTC_YEAR); + spin_unlock_irqrestore(&rtc_lock, flags); + /* vRTC YEAR reg contains the offset to 1960 */ year += 1960; @@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void) int vrtc_set_mmss(unsigned long nowtime) { int real_sec, real_min; + unsigned long flags; int vrtc_min; + spin_lock_irqsave(&rtc_lock, flags); vrtc_min = vrtc_cmos_read(RTC_MINUTES); real_sec = nowtime % 60; @@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime) vrtc_cmos_write(real_sec, RTC_SECONDS); vrtc_cmos_write(real_min, RTC_MINUTES); + spin_unlock_irqrestore(&rtc_lock, flags); + return 0; } diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5cc821cb2e09..ae559fe91c25 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -49,11 +49,3 @@ config XEN_DEBUG_FS help Enable statistics output and various tuning options in debugfs. Enabling this option may incur a significant performance overhead. - -config XEN_DEBUG - bool "Enable Xen debug checks" - depends on XEN - default n - help - Enable various WARN_ON checks in the Xen MMU code. - Enabling this option WILL incur a significant performance overhead. diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 3dd53f997b11..87f6673b1207 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -495,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); -#ifdef CONFIG_XEN_DEBUG -pte_t xen_make_pte_debug(pteval_t pte) -{ - phys_addr_t addr = (pte & PTE_PFN_MASK); - phys_addr_t other_addr; - bool io_page = false; - pte_t _pte; - - if (pte & _PAGE_IOMAP) - io_page = true; - - _pte = xen_make_pte(pte); - - if (!addr) - return _pte; - - if (io_page && - (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { - other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; - WARN_ONCE(addr != other_addr, - "0x%lx is using VM_IO, but it is 0x%lx!\n", - (unsigned long)addr, (unsigned long)other_addr); - } else { - pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; - other_addr = (_pte.pte & PTE_PFN_MASK); - WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), - "0x%lx is missing VM_IO (and wasn't fixed)!\n", - (unsigned long)addr); - } - - return _pte; -} -PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); -#endif - static pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); @@ -1992,9 +1957,6 @@ void __init xen_ident_map_ISA(void) static void __init xen_post_allocator_init(void) { -#ifdef CONFIG_XEN_DEBUG - pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); -#endif pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; @@ -2404,17 +2366,3 @@ out: return err; } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); - -#ifdef CONFIG_XEN_DEBUG_FS -static int p2m_dump_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, p2m_dump_show, NULL); -} - -static const struct file_operations p2m_dump_fops = { - .open = p2m_dump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 795e003517e1..1b267e75158d 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -161,7 +161,9 @@ #include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> +#include <xen/grant_table.h> +#include "multicalls.h" #include "xen-ops.h" static void __init m2p_override_init(void); @@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn) } /* Add an MFN override for a particular page */ -int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) +int m2p_add_override(unsigned long mfn, struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long pfn; @@ -692,16 +695,28 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } - - page->private = mfn; + WARN_ON(PagePrivate(page)); + SetPagePrivate(page); + set_page_private(page, mfn); page->index = pfn_to_mfn(pfn); if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; - if (clear_pte && !PageHighMem(page)) - /* Just zap old mapping for now */ - pte_clear(&init_mm, address, ptep); + if (kmap_op != NULL) { + if (!PageHighMem(page)) { + struct multicall_space mcs = + xen_mc_entry(sizeof(*kmap_op)); + + MULTI_grant_table_op(mcs.mc, + GNTTABOP_map_grant_ref, kmap_op, 1); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + } + /* let's use dev_bus_addr to record the old mfn instead */ + kmap_op->dev_bus_addr = page->index; + page->index = (unsigned long) kmap_op; + } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); @@ -735,13 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte) spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); - set_phys_to_machine(pfn, page->index); + WARN_ON(!PagePrivate(page)); + ClearPagePrivate(page); - if (clear_pte && !PageHighMem(page)) - set_pte_at(&init_mm, address, ptep, - pfn_pte(pfn, PAGE_KERNEL)); - /* No tlb flush necessary because the caller already - * left the pte unmapped. */ + if (clear_pte) { + struct gnttab_map_grant_ref *map_op = + (struct gnttab_map_grant_ref *) page->index; + set_phys_to_machine(pfn, map_op->dev_bus_addr); + if (!PageHighMem(page)) { + struct multicall_space mcs; + struct gnttab_unmap_grant_ref *unmap_op; + + /* + * It might be that we queued all the m2p grant table + * hypercalls in a multicall, then m2p_remove_override + * get called before the multicall has actually been + * issued. In this case handle is going to -1 because + * it hasn't been modified yet. + */ + if (map_op->handle == -1) + xen_mc_flush(); + /* + * Now if map_op->handle is negative it means that the + * hypercall actually returned an error. + */ + if (map_op->handle == GNTST_general_error) { + printk(KERN_WARNING "m2p_remove_override: " + "pfn %lx mfn %lx, failed to modify kernel mappings", + pfn, mfn); + return -1; + } + + mcs = xen_mc_entry( + sizeof(struct gnttab_unmap_grant_ref)); + unmap_op = mcs.args; + unmap_op->host_addr = map_op->host_addr; + unmap_op->handle = map_op->handle; + unmap_op->dev_bus_addr = 0; + + MULTI_grant_table_op(mcs.mc, + GNTTABOP_unmap_grant_ref, unmap_op, 1); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + set_pte_at(&init_mm, address, ptep, + pfn_pte(pfn, PAGE_KERNEL)); + __flush_tlb_single(address); + map_op->host_addr = 0; + } + } else + set_phys_to_machine(pfn, page->index); return 0; } @@ -758,7 +816,7 @@ struct page *m2p_find_override(unsigned long mfn) spin_lock_irqsave(&m2p_override_lock, flags); list_for_each_entry(p, bucket, lru) { - if (p->private == mfn) { + if (page_private(p) == mfn) { ret = p; break; } @@ -782,8 +840,9 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) EXPORT_SYMBOL_GPL(m2p_find_override_pfn); #ifdef CONFIG_XEN_DEBUG_FS - -int p2m_dump_show(struct seq_file *m, void *v) +#include <linux/debugfs.h> +#include "debugfs.h" +static int p2m_dump_show(struct seq_file *m, void *v) { static const char * const level_name[] = { "top", "middle", "entry", "abnormal", "error"}; @@ -859,4 +918,32 @@ int p2m_dump_show(struct seq_file *m, void *v) #undef TYPE_PFN #undef TYPE_UNKNOWN } -#endif + +static int p2m_dump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, p2m_dump_show, NULL); +} + +static const struct file_operations p2m_dump_fops = { + .open = p2m_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *d_mmu_debug; + +static int __init xen_p2m_debugfs(void) +{ + struct dentry *d_xen = xen_init_debugfs(); + + if (d_xen == NULL) + return -ENOMEM; + + d_mmu_debug = debugfs_create_dir("mmu", d_xen); + + debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); + return 0; +} +fs_initcall(xen_p2m_debugfs); +#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 46d6d21dbdbe..38d0af4fefec 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -37,7 +37,10 @@ extern void xen_syscall_target(void); extern void xen_syscall32_target(void); /* Amount of extra memory space we add to the e820 ranges */ -phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; + +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; /* * The maximum amount of extra memory compared to the base size. The @@ -51,48 +54,47 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; */ #define EXTRA_MEM_RATIO (10) -static void __init xen_add_extra_mem(unsigned long pages) +static void __init xen_add_extra_mem(u64 start, u64 size) { unsigned long pfn; + int i; - u64 size = (u64)pages * PAGE_SIZE; - u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; - - if (!pages) - return; - - e820_add_region(extra_start, size, E820_RAM); - sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - - memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + /* Add new region. */ + if (xen_extra_mem[i].size == 0) { + xen_extra_mem[i].start = start; + xen_extra_mem[i].size = size; + break; + } + /* Append to existing region. */ + if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { + xen_extra_mem[i].size += size; + break; + } + } + if (i == XEN_EXTRA_MEM_MAX_REGIONS) + printk(KERN_WARNING "Warning: not enough extra memory regions\n"); - xen_extra_mem_size += size; + memblock_x86_reserve_range(start, start + size, "XEN EXTRA"); - xen_max_p2m_pfn = PFN_DOWN(extra_start + size); + xen_max_p2m_pfn = PFN_DOWN(start + size); - for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) + for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } -static unsigned long __init xen_release_chunk(phys_addr_t start_addr, - phys_addr_t end_addr) +static unsigned long __init xen_release_chunk(unsigned long start, + unsigned long end) { struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; - unsigned long start, end; unsigned long len = 0; unsigned long pfn; int ret; - start = PFN_UP(start_addr); - end = PFN_DOWN(end_addr); - - if (end <= start) - return 0; - for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -117,72 +119,52 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, return len; } -static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, - const struct e820map *e820) +static unsigned long __init xen_set_identity_and_release( + const struct e820entry *list, size_t map_size, unsigned long nr_pages) { - phys_addr_t max_addr = PFN_PHYS(max_pfn); - phys_addr_t last_end = ISA_END_ADDRESS; + phys_addr_t start = 0; unsigned long released = 0; - int i; - - /* Free any unused memory above the low 1Mbyte. */ - for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { - phys_addr_t end = e820->map[i].addr; - end = min(max_addr, end); - - if (last_end < end) - released += xen_release_chunk(last_end, end); - last_end = max(last_end, e820->map[i].addr + e820->map[i].size); - } - - if (last_end < max_addr) - released += xen_release_chunk(last_end, max_addr); - - printk(KERN_INFO "released %lu pages of unused memory\n", released); - return released; -} - -static unsigned long __init xen_set_identity(const struct e820entry *list, - ssize_t map_size) -{ - phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; - phys_addr_t start_pci = last; - const struct e820entry *entry; unsigned long identity = 0; + const struct e820entry *entry; int i; + /* + * Combine non-RAM regions and gaps until a RAM region (or the + * end of the map) is reached, then set the 1:1 map and + * release the pages (if available) in those non-RAM regions. + * + * The combined non-RAM regions are rounded to a whole number + * of pages so any partial pages are accessible via the 1:1 + * mapping. This is needed for some BIOSes that put (for + * example) the DMI tables in a reserved region that begins on + * a non-page boundary. + */ for (i = 0, entry = list; i < map_size; i++, entry++) { - phys_addr_t start = entry->addr; - phys_addr_t end = start + entry->size; + phys_addr_t end = entry->addr + entry->size; - if (start < last) - start = last; + if (entry->type == E820_RAM || i == map_size - 1) { + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); - if (end <= start) - continue; + if (entry->type == E820_RAM) + end_pfn = PFN_UP(entry->addr); - /* Skip over the 1MB region. */ - if (last > end) - continue; + if (start_pfn < end_pfn) { + if (start_pfn < nr_pages) + released += xen_release_chunk( + start_pfn, min(end_pfn, nr_pages)); - if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { - if (start > start_pci) identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(start)); - - /* Without saving 'last' we would gooble RAM too - * at the end of the loop. */ - last = end; - start_pci = end; - continue; + start_pfn, end_pfn); + } + start = end; } - start_pci = min(start, start_pci); - last = end; } - if (last > start_pci) - identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(last)); - return identity; + + printk(KERN_INFO "Released %lu pages of unused memory\n", released); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); + + return released; } static unsigned long __init xen_get_max_pages(void) @@ -197,21 +179,32 @@ static unsigned long __init xen_get_max_pages(void) return min(max_pages, MAX_DOMAIN_PAGES); } +static void xen_align_and_add_e820_region(u64 start, u64 size, int type) +{ + u64 end = start + size; + + /* Align RAM regions to page boundaries. */ + if (type == E820_RAM) { + start = PAGE_ALIGN(start); + end &= ~((u64)PAGE_SIZE - 1); + } + + e820_add_region(start, end - start, type); +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; - static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; + unsigned long max_pages; unsigned long extra_pages = 0; - unsigned long extra_limit; - unsigned long identity_pages = 0; int i; int op; @@ -237,58 +230,65 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); - memcpy(map_raw, map, sizeof(map)); - e820.nr_map = 0; - xen_extra_mem_start = mem_end; - for (i = 0; i < memmap.nr_entries; i++) { - unsigned long long end; - - /* Guard against non-page aligned E820 entries. */ - if (map[i].type == E820_RAM) - map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; - - end = map[i].addr + map[i].size; - if (map[i].type == E820_RAM && end > mem_end) { - /* RAM off the end - may be partially included */ - u64 delta = min(map[i].size, end - mem_end); - - map[i].size -= delta; - end -= delta; - - extra_pages += PFN_DOWN(delta); - /* - * Set RAM below 4GB that is not for us to be unusable. - * This prevents "System RAM" address space from being - * used as potential resource for I/O address (happens - * when 'allocate_resource' is called). - */ - if (delta && - (xen_initial_domain() && end < 0x100000000ULL)) - e820_add_region(end, delta, E820_UNUSABLE); + /* Make sure the Xen-supplied memory map is well-ordered. */ + sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); + + max_pages = xen_get_max_pages(); + if (max_pages > max_pfn) + extra_pages += max_pages - max_pfn; + + /* + * Set P2M for all non-RAM pages and E820 gaps to be identity + * type PFNs. Any RAM pages that would be made inaccesible by + * this are first released. + */ + xen_released_pages = xen_set_identity_and_release( + map, memmap.nr_entries, max_pfn); + extra_pages += xen_released_pages; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO + * factor the base size. On non-highmem systems, the base + * size is the full initial memory allocation; on highmem it + * is limited to the max size of lowmem, so that it doesn't + * get completely filled. + * + * In principle there could be a problem in lowmem systems if + * the initial memory is also very large with respect to + * lowmem, but we won't try to deal with that here. + */ + extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages); + + i = 0; + while (i < memmap.nr_entries) { + u64 addr = map[i].addr; + u64 size = map[i].size; + u32 type = map[i].type; + + if (type == E820_RAM) { + if (addr < mem_end) { + size = min(size, mem_end - addr); + } else if (extra_pages) { + size = min(size, (u64)extra_pages * PAGE_SIZE); + extra_pages -= size / PAGE_SIZE; + xen_add_extra_mem(addr, size); + } else + type = E820_UNUSABLE; } - if (map[i].size > 0 && end > xen_extra_mem_start) - xen_extra_mem_start = end; + xen_align_and_add_e820_region(addr, size, type); - /* Add region if any remains */ - if (map[i].size > 0) - e820_add_region(map[i].addr, map[i].size, map[i].type); + map[i].addr += size; + map[i].size -= size; + if (map[i].size == 0) + i++; } - /* Align the balloon area so that max_low_pfn does not get set - * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that xen_extra_mem_start gets set in the loop above to be - * past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) - xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. - * - * In Dom0, the host E820 information can leave gaps in the - * ISA range, which would cause us to release those pages. To - * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); @@ -305,44 +305,6 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - extra_limit = xen_get_max_pages(); - if (max_pfn + extra_pages > extra_limit) { - if (extra_limit > max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - } - - extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); - - /* - * Clamp the amount of extra memory to a EXTRA_MEM_RATIO - * factor the base size. On non-highmem systems, the base - * size is the full initial memory allocation; on highmem it - * is limited to the max size of lowmem, so that it doesn't - * get completely filled. - * - * In principle there could be a problem in lowmem systems if - * the initial memory is also very large with respect to - * lowmem, but we won't try to deal with that here. - */ - extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), - max_pfn + extra_pages); - - if (extra_limit >= max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - - xen_add_extra_mem(extra_pages); - - /* - * Set P2M for all non-RAM pages and E820 gaps to be identity - * type PFNs. We supply it with the non-sanitized version - * of the E820. - */ - identity_pages = xen_set_identity(map_raw, memmap.nr_entries); - printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } |