summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-08-26 16:16:08 +0300
committerAvi Kivity <avi@qumranet.com>2008-10-15 10:15:22 +0200
commit6c41f428b72afe5a581b967590c12538db31d399 (patch)
tree8b1cd1520469b23d033d7046db2d03ef727d54a4 /arch/x86/kvm
parent0be9e929e398d6da6406183a8732dbfd0937fafe (diff)
downloadlinux-6c41f428b72afe5a581b967590c12538db31d399.tar.gz
linux-6c41f428b72afe5a581b967590c12538db31d399.tar.bz2
linux-6c41f428b72afe5a581b967590c12538db31d399.zip
KVM: MMU: Infer shadow root level in direct_map()
In all cases the shadow root level is available in mmu.shadow_root_level, so there is no need to pass it as a parameter. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3ee856f6812d..72f739aa8623 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1227,11 +1227,11 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
}
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
- int largepage, gfn_t gfn, pfn_t pfn,
- int level)
+ int largepage, gfn_t gfn, pfn_t pfn)
{
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
int pt_write = 0;
+ int level = vcpu->arch.mmu.shadow_root_level;
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
@@ -1299,8 +1299,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
- r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
- PT32E_ROOT_LEVEL);
+ r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
spin_unlock(&vcpu->kvm->mmu_lock);
@@ -1455,7 +1454,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
- largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
+ largepage, gfn, pfn);
spin_unlock(&vcpu->kvm->mmu_lock);
return r;