summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorXiao Guangrong <guangrong.xiao@linux.intel.com>2015-08-05 12:04:24 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2015-08-05 12:47:24 +0200
commitc258b62b264fdc469b6d3610a907708068145e3b (patch)
tree57c870353bbb93da102e8e26deb7128357523280 /arch/x86/kvm
parent81b8eebbc3511bfa21520314b07988a875a4d066 (diff)
downloadlinux-c258b62b264fdc469b6d3610a907708068145e3b.tar.gz
linux-c258b62b264fdc469b6d3610a907708068145e3b.tar.bz2
linux-c258b62b264fdc469b6d3610a907708068145e3b.zip
KVM: MMU: introduce the framework to check zero bits on sptes
We have abstracted the data struct and functions which are used to check reserved bit on guest page tables, now we extend the logic to check zero bits on shadow page tables The zero bits on sptes include not only reserved bits on hardware but also the bits that SPTEs willnever use. For example, shadow pages will never use GB pages unless the guest uses them too. Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c50
-rw-r--r--arch/x86/kvm/mmu.h3
-rw-r--r--arch/x86/kvm/svm.c1
3 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a965f3f58586..2e3e6454d977 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3699,6 +3699,53 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
cpuid_maxphyaddr(vcpu), execonly);
}
+/*
+ * the page table on host is the shadow page table for the page
+ * table in guest or amd nested guest, its mmu features completely
+ * follow the features in guest.
+ */
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+{
+ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ context->shadow_root_level, context->nx,
+ guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+}
+EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+
+/*
+ * the direct page table on host, use as much mmu features as
+ * possible, however, kvm currently does not do execution-protection.
+ */
+static void
+reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
+{
+ if (guest_cpuid_is_amd(vcpu))
+ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ context->shadow_root_level, false,
+ cpu_has_gbpages, true);
+ else
+ __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ false);
+
+}
+
+/*
+ * as the comments in reset_shadow_zero_bits_mask() except it
+ * is the shadow page table for intel nested guest.
+ */
+static void
+reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context, bool execonly)
+{
+ __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits, execonly);
+}
+
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
{
@@ -3877,6 +3924,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
+ reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
@@ -3904,6 +3952,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu);
context->base_role.smm = is_smm(vcpu);
+ reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -3927,6 +3976,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly)
update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+ reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 398d21c0f6dd..2299d15c91dc 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -53,6 +53,9 @@ static inline u64 rsvd_bits(int s, int e)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+
/*
* Return values of handle_mmio_page_fault_common:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1a114d8d22b..74d825716f4f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2107,6 +2107,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
+ reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}