diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2020-07-15 20:41:15 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-07-30 18:14:34 -0400 |
commit | 59505b55aa0957bcad84e74bb80153d5c77916f6 (patch) | |
tree | 1bbd2018c14ecc836ab23c71131bdbdf24040447 /arch/x86/kvm | |
parent | f291a358e0d88e3b20431266d8f78fc5eda1aec7 (diff) | |
download | linux-stable-59505b55aa0957bcad84e74bb80153d5c77916f6.tar.gz linux-stable-59505b55aa0957bcad84e74bb80153d5c77916f6.tar.bz2 linux-stable-59505b55aa0957bcad84e74bb80153d5c77916f6.zip |
KVM: x86/mmu: Add separate helper for shadow NPT root page role calc
Refactor the shadow NPT role calculation into a separate helper to
better differentiate it from the non-nested shadow MMU, e.g. the NPT
variant is never direct and derives its root level from the TDP level.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200716034122.5998-3-sean.j.christopherson@intel.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 678b6209dad5..0fb033ce6cc5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4908,7 +4908,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) } static union kvm_mmu_role -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only) { union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); @@ -4916,9 +4916,19 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) !is_write_protection(vcpu); role.base.smap_andnot_wp = role.ext.cr4_smap && !is_write_protection(vcpu); - role.base.direct = !is_paging(vcpu); role.base.gpte_is_8_bytes = !!is_pae(vcpu); + return role; +} + +static union kvm_mmu_role +kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) +{ + union kvm_mmu_role role = + kvm_calc_shadow_root_page_role_common(vcpu, base_only); + + role.base.direct = !is_paging(vcpu); + if (!is_long_mode(vcpu)) role.base.level = PT32E_ROOT_LEVEL; else if (is_la57_mode(vcpu)) @@ -4956,14 +4966,24 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efe shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); } +static union kvm_mmu_role +kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) +{ + union kvm_mmu_role role = + kvm_calc_shadow_root_page_role_common(vcpu, false); + + role.base.direct = false; + role.base.level = vcpu->arch.tdp_level; + + return role; +} + void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, gpa_t nested_cr3) { struct kvm_mmu *context = &vcpu->arch.guest_mmu; - union kvm_mmu_role new_role = - kvm_calc_shadow_mmu_root_page_role(vcpu, false); + union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); - new_role.base.level = vcpu->arch.tdp_level; context->shadow_root_level = new_role.base.level; __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false); |