summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-11-14 18:22:28 +0900
committerAvi Kivity <avi@redhat.com>2011-12-27 11:17:17 +0200
commit9b9b1492364758de82c19c36f07baa9ae162c7e5 (patch)
tree2d600f1bdcc36831597cdcb779f434d668d9d392
parentd6eebf8b80316ea61718dc115cd6a20c16195327 (diff)
downloadlinux-9b9b1492364758de82c19c36f07baa9ae162c7e5.tar.gz
linux-9b9b1492364758de82c19c36f07baa9ae162c7e5.tar.bz2
linux-9b9b1492364758de82c19c36f07baa9ae162c7e5.zip
KVM: MMU: Split gfn_to_rmap() into two functions
rmap_write_protect() calls gfn_to_rmap() for each level with gfn fixed. This results in calling gfn_to_memslot() repeatedly with that gfn. This patch introduces __gfn_to_rmap() which takes the slot as an argument to avoid this. This is also needed for the following dirty logging optimization. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 973f25480afa..fa71085f75a3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -958,23 +958,29 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
}
}
-/*
- * Take gfn and return the reverse mapping to it.
- */
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
+static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
+ struct kvm_memory_slot *slot)
{
- struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
- slot = gfn_to_memslot(kvm, gfn);
if (likely(level == PT_PAGE_TABLE_LEVEL))
return &slot->rmap[gfn - slot->base_gfn];
linfo = lpage_info_slot(gfn, slot, level);
-
return &linfo->rmap_pde;
}
+/*
+ * Take gfn and return the reverse mapping to it.
+ */
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ return __gfn_to_rmap(kvm, gfn, level, slot);
+}
+
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_memory_cache *cache;
@@ -1019,12 +1025,14 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
+ struct kvm_memory_slot *slot;
unsigned long *rmapp;
u64 *spte;
int i, write_protected = 0;
- rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
+ slot = gfn_to_memslot(kvm, gfn);
+ rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));
@@ -1039,7 +1047,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
/* check for huge page mappings */
for (i = PT_DIRECTORY_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- rmapp = gfn_to_rmap(kvm, gfn, i);
+ rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
BUG_ON(!(*spte & PT_PRESENT_MASK));