diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2012-03-01 19:31:22 +0900 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 12:49:56 +0300 |
commit | a0ed46073c14f66dbf0707aaa7588b78da83d7c6 (patch) | |
tree | 3dd18f222e5700f7e3251dcbdb4bc325b93ae14b /arch/x86 | |
parent | 248997095d652576f1213028a95ca5fff85d089f (diff) | |
download | linux-a0ed46073c14f66dbf0707aaa7588b78da83d7c6.tar.gz linux-a0ed46073c14f66dbf0707aaa7588b78da83d7c6.tar.bz2 linux-a0ed46073c14f66dbf0707aaa7588b78da83d7c6.zip |
KVM: MMU: Split the main body of rmap_write_protect() off from others
We will use this in the following patch to implement another function
which needs to write protect pages using the rmap information.
Note that there is a small change in debug printing for large pages:
we do not differentiate them from others to avoid duplicating code.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4cb164268846..c8b5694d1a48 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1010,42 +1010,43 @@ static void drop_spte(struct kvm *kvm, u64 *sptep) rmap_remove(kvm, sptep); } -int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, - struct kvm_memory_slot *slot) +static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) { - unsigned long *rmapp; - u64 *spte; - int i, write_protected = 0; + u64 *spte = NULL; + int write_protected = 0; - rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot); - spte = rmap_next(rmapp, NULL); - while (spte) { + while ((spte = rmap_next(rmapp, spte))) { BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); - if (is_writable_pte(*spte)) { + + if (!is_writable_pte(*spte)) + continue; + + if (level == PT_PAGE_TABLE_LEVEL) { mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); - write_protected = 1; + } else { + BUG_ON(!is_large_pte(*spte)); + drop_spte(kvm, spte); + --kvm->stat.lpages; + spte = NULL; } - spte = rmap_next(rmapp, spte); + + write_protected = 1; } - /* check for huge page mappings */ - for (i = PT_DIRECTORY_LEVEL; + return write_protected; +} + +int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, + struct kvm_memory_slot *slot) +{ + unsigned long *rmapp; + int i, write_protected = 0; + + for (i = PT_PAGE_TABLE_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { rmapp = __gfn_to_rmap(gfn, i, slot); - spte = rmap_next(rmapp, NULL); - while (spte) { - BUG_ON(!(*spte & PT_PRESENT_MASK)); - BUG_ON(!is_large_pte(*spte)); - pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); - if (is_writable_pte(*spte)) { - drop_spte(kvm, spte); - --kvm->stat.lpages; - spte = NULL; - write_protected = 1; - } - spte = rmap_next(rmapp, spte); - } + write_protected |= __rmap_write_protect(kvm, rmapp, i); } return write_protected; |