diff options
author | Tejun Heo <tj@kernel.org> | 2011-11-28 09:46:22 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-11-28 09:46:22 -0800 |
commit | d4bbf7e7759afc172e2bfbc5c416324590049cdd (patch) | |
tree | 7eab5ee5481cd3dcf1162329fec827177640018a /arch/x86/xen/mmu.c | |
parent | a150439c4a97db379f0ed6faa46fbbb6e7bf3cb2 (diff) | |
parent | 401d0069cb344f401bc9d264c31db55876ff78c0 (diff) | |
download | linux-stable-d4bbf7e7759afc172e2bfbc5c416324590049cdd.tar.gz linux-stable-d4bbf7e7759afc172e2bfbc5c416324590049cdd.tar.bz2 linux-stable-d4bbf7e7759afc172e2bfbc5c416324590049cdd.zip |
Merge branch 'master' into x86/memblock
Conflicts & resolutions:
* arch/x86/xen/setup.c
dc91c728fd "xen: allow extra memory to be in multiple regions"
24aa07882b "memblock, x86: Replace memblock_x86_reserve/free..."
conflicted on xen_add_extra_mem() updates. The resolution is
trivial as the latter just want to replace
memblock_x86_reserve_range() with memblock_reserve().
* drivers/pci/intel-iommu.c
166e9278a3f "x86/ia64: intel-iommu: move to drivers/iommu/"
5dfe8660a3d "bootmem: Replace work_with_active_regions() with..."
conflicted as the former moved the file under drivers/iommu/.
Resolved by applying the chnages from the latter on the moved
file.
* mm/Kconfig
6661672053a "memblock: add NO_BOOTMEM config symbol"
c378ddd53f9 "memblock, x86: Make ARCH_DISCARD_MEMBLOCK a config option"
conflicted trivially. Both added config options. Just
letting both add their own options resolves the conflict.
* mm/memblock.c
d1f0ece6cdc "mm/memblock.c: small function definition fixes"
ed7b56a799c "memblock: Remove memblock_memory_can_coalesce()"
confliected. The former updates function removed by the
latter. Resolution is trivial.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 205 |
1 files changed, 122 insertions, 83 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ad54fa10f8a2..f4bf8aa574f4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -48,6 +48,8 @@ #include <linux/memblock.h> #include <linux/seq_file.h> +#include <trace/events/xen.h> + #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) struct multicall_space mcs; struct mmu_update *u; + trace_xen_mmu_set_domain_pte(ptep, pteval, domid); + mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; @@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update) *u = *update; } +static void xen_extend_mmuext_op(const struct mmuext_op *op) +{ + struct multicall_space mcs; + struct mmuext_op *u; + + mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); + + if (mcs.mc != NULL) { + mcs.mc->args[1]++; + } else { + mcs = __xen_mc_entry(sizeof(*u)); + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); + } + + u = mcs.args; + *u = *op; +} + static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) { struct mmu_update u; @@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) static void xen_set_pmd(pmd_t *ptr, pmd_t val) { + trace_xen_mmu_set_pmd(ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) return true; } -static void xen_set_pte(pte_t *ptep, pte_t pteval) +static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) { if (!xen_batched_set_pte(ptep, pteval)) native_set_pte(ptep, pteval); } +static void xen_set_pte(pte_t *ptep, pte_t pteval) +{ + trace_xen_mmu_set_pte(ptep, pteval); + __xen_set_pte(ptep, pteval); +} + static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - xen_set_pte(ptep, pteval); + trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); + __xen_set_pte(ptep, pteval); } pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ + trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); return *ptep; } @@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, { struct mmu_update u; + trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; @@ -462,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); -#ifdef CONFIG_XEN_DEBUG -pte_t xen_make_pte_debug(pteval_t pte) -{ - phys_addr_t addr = (pte & PTE_PFN_MASK); - phys_addr_t other_addr; - bool io_page = false; - pte_t _pte; - - if (pte & _PAGE_IOMAP) - io_page = true; - - _pte = xen_make_pte(pte); - - if (!addr) - return _pte; - - if (io_page && - (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { - other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; - WARN_ONCE(addr != other_addr, - "0x%lx is using VM_IO, but it is 0x%lx!\n", - (unsigned long)addr, (unsigned long)other_addr); - } else { - pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; - other_addr = (_pte.pte & PTE_PFN_MASK); - WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), - "0x%lx is missing VM_IO (and wasn't fixed)!\n", - (unsigned long)addr); - } - - return _pte; -} -PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); -#endif - static pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); @@ -530,6 +528,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) static void xen_set_pud(pud_t *ptr, pud_t val) { + trace_xen_mmu_set_pud(ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -543,17 +543,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val) #ifdef CONFIG_X86_PAE static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { + trace_xen_mmu_set_pte_atomic(ptep, pte); set_64bit((u64 *)ptep, native_pte_val(pte)); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + trace_xen_mmu_pte_clear(mm, addr, ptep); if (!xen_batched_set_pte(ptep, native_make_pte(0))) native_pte_clear(mm, addr, ptep); } static void xen_pmd_clear(pmd_t *pmdp) { + trace_xen_mmu_pmd_clear(pmdp); set_pmd(pmdp, __pmd(0)); } #endif /* CONFIG_X86_PAE */ @@ -629,6 +632,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) { pgd_t *user_ptr = xen_get_user_pgd(ptr); + trace_xen_mmu_set_pgd(ptr, user_ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -788,14 +793,12 @@ static void xen_pte_unlock(void *v) static void xen_do_pin(unsigned level, unsigned long pfn) { - struct mmuext_op *op; - struct multicall_space mcs; + struct mmuext_op op; - mcs = __xen_mc_entry(sizeof(*op)); - op = mcs.args; - op->cmd = level; - op->arg1.mfn = pfn_to_mfn(pfn); - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + op.cmd = level; + op.arg1.mfn = pfn_to_mfn(pfn); + + xen_extend_mmuext_op(&op); } static int xen_pin_page(struct mm_struct *mm, struct page *page, @@ -863,6 +866,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { + trace_xen_mmu_pgd_pin(mm, pgd); + xen_mc_batch(); if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { @@ -988,6 +993,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page, /* Release a pagetables pages back as normal RW */ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) { + trace_xen_mmu_pgd_unpin(mm, pgd); + xen_mc_batch(); xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); @@ -1196,6 +1203,8 @@ static void xen_flush_tlb(void) struct mmuext_op *op; struct multicall_space mcs; + trace_xen_mmu_flush_tlb(0); + preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); @@ -1214,6 +1223,8 @@ static void xen_flush_tlb_single(unsigned long addr) struct mmuext_op *op; struct multicall_space mcs; + trace_xen_mmu_flush_tlb_single(addr); + preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); @@ -1240,6 +1251,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, } *args; struct multicall_space mcs; + trace_xen_mmu_flush_tlb_others(cpus, mm, va); + if (cpumask_empty(cpus)) return; /* nothing to do */ @@ -1275,10 +1288,11 @@ static void set_current_cr3(void *v) static void __xen_write_cr3(bool kernel, unsigned long cr3) { - struct mmuext_op *op; - struct multicall_space mcs; + struct mmuext_op op; unsigned long mfn; + trace_xen_mmu_write_cr3(kernel, cr3); + if (cr3) mfn = pfn_to_mfn(PFN_DOWN(cr3)); else @@ -1286,13 +1300,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) WARN_ON(mfn == 0 && kernel); - mcs = __xen_mc_entry(sizeof(*op)); - - op = mcs.args; - op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; - op->arg1.mfn = mfn; + op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; + op.arg1.mfn = mfn; - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + xen_extend_mmuext_op(&op); if (kernel) { percpu_write(xen_cr3, cr3); @@ -1451,19 +1462,52 @@ static void __init xen_release_pmd_init(unsigned long pfn) make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } +static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct multicall_space mcs; + struct mmuext_op *op; + + mcs = __xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = cmd; + op->arg1.mfn = pfn_to_mfn(pfn); + + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +} + +static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) +{ + struct multicall_space mcs; + unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); + + mcs = __xen_mc_entry(0); + MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, + pfn_pte(pfn, prot), 0); +} + /* This needs to make sure the new pte page is pinned iff its being attached to a pinned pagetable. */ -static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) +static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, + unsigned level) { - struct page *page = pfn_to_page(pfn); + bool pinned = PagePinned(virt_to_page(mm->pgd)); + + trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); + + if (pinned) { + struct page *page = pfn_to_page(pfn); - if (PagePinned(virt_to_page(mm->pgd))) { SetPagePinned(page); if (!PageHighMem(page)) { - make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); + xen_mc_batch(); + + __set_pfn_prot(pfn, PAGE_KERNEL_RO); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } else { /* make sure there are no stray mappings of this page */ @@ -1483,15 +1527,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) } /* This should never happen until we're OK to use struct page */ -static void xen_release_ptpage(unsigned long pfn, unsigned level) +static inline void xen_release_ptpage(unsigned long pfn, unsigned level) { struct page *page = pfn_to_page(pfn); + bool pinned = PagePinned(page); + + trace_xen_mmu_release_ptpage(pfn, level, pinned); - if (PagePinned(page)) { + if (pinned) { if (!PageHighMem(page)) { + xen_mc_batch(); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); - make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); + __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + + __set_pfn_prot(pfn, PAGE_KERNEL); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } ClearPagePinned(page); } @@ -1626,15 +1678,17 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) void __init xen_setup_machphys_mapping(void) { struct xen_machphys_mapping mapping; - unsigned long machine_to_phys_nr_ents; if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; - machine_to_phys_nr_ents = mapping.max_mfn + 1; + machine_to_phys_nr = mapping.max_mfn + 1; } else { - machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; + machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } - machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); +#ifdef CONFIG_X86_32 + WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) + < machine_to_phys_mapping); +#endif } #ifdef CONFIG_X86_64 @@ -1825,6 +1879,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) # endif #else case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: + case VVAR_PAGE: #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: @@ -1865,7 +1920,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #ifdef CONFIG_X86_64 /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ - if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { + if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || + idx == VVAR_PAGE) { unsigned long vaddr = __fix_to_virt(idx); set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); } @@ -1897,9 +1953,6 @@ void __init xen_ident_map_ISA(void) static void __init xen_post_allocator_init(void) { -#ifdef CONFIG_XEN_DEBUG - pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); -#endif pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; @@ -2309,17 +2362,3 @@ out: return err; } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); - -#ifdef CONFIG_XEN_DEBUG_FS -static int p2m_dump_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, p2m_dump_show, NULL); -} - -static const struct file_operations p2m_dump_fops = { - .open = p2m_dump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif /* CONFIG_XEN_DEBUG_FS */ |