diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-12-17 14:58:43 -0800 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2011-07-18 15:43:28 -0700 |
commit | bc7fe1d977734efe6059d6d537b439135ad4a34c (patch) | |
tree | 06a437e5cabe7355e0824935b0a8d24d801b6289 /arch/x86/xen | |
parent | dcf7435cfe617c9d9f3dd501d3f5ed4d0b30c218 (diff) | |
download | linux-bc7fe1d977734efe6059d6d537b439135ad4a34c.tar.gz linux-bc7fe1d977734efe6059d6d537b439135ad4a34c.tar.bz2 linux-bc7fe1d977734efe6059d6d537b439135ad4a34c.zip |
xen/mmu: tune pgtable alloc/release
Make sure the fastpath code is inlined. Batch the page permission change
and the pin/unpin, and make sure that it can be batched with any
adjacent set_pte/pmd/etc operations.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/mmu.c | 53 |
1 files changed, 44 insertions, 9 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index eb9112687cee..f987bde77c49 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1497,22 +1497,52 @@ static void __init xen_release_pmd_init(unsigned long pfn) make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } +static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct multicall_space mcs; + struct mmuext_op *op; + + mcs = __xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = cmd; + op->arg1.mfn = pfn_to_mfn(pfn); + + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +} + +static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) +{ + struct multicall_space mcs; + unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); + + mcs = __xen_mc_entry(0); + MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, + pfn_pte(pfn, prot), 0); +} + /* This needs to make sure the new pte page is pinned iff its being attached to a pinned pagetable. */ -static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) +static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, + unsigned level) { - struct page *page = pfn_to_page(pfn); - int pinned = PagePinned(virt_to_page(mm->pgd)); - + bool pinned = PagePinned(virt_to_page(mm->pgd)); + trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); if (pinned) { + struct page *page = pfn_to_page(pfn); + SetPagePinned(page); if (!PageHighMem(page)) { - make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); + xen_mc_batch(); + + __set_pfn_prot(pfn, PAGE_KERNEL_RO); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } else { /* make sure there are no stray mappings of this page */ @@ -1532,7 +1562,7 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) } /* This should never happen until we're OK to use struct page */ -static void xen_release_ptpage(unsigned long pfn, unsigned level) +static inline void xen_release_ptpage(unsigned long pfn, unsigned level) { struct page *page = pfn_to_page(pfn); bool pinned = PagePinned(page); @@ -1541,9 +1571,14 @@ static void xen_release_ptpage(unsigned long pfn, unsigned level) if (pinned) { if (!PageHighMem(page)) { + xen_mc_batch(); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); - make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); + __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + + __set_pfn_prot(pfn, PAGE_KERNEL); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } ClearPagePinned(page); } |