diff options
author | Jim Mattson <jmattson@google.com> | 2020-07-07 15:36:30 -0700 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-07-08 16:21:58 -0400 |
commit | d42e3fae6faedacb2a7b4c984417ed0d9f540401 (patch) | |
tree | 4570c63b593111376cbb3ea9e7e2b0cd48521c9f /arch/x86/kvm | |
parent | 9a3c05e658d4d31b38ef03fe5c17bc2039402ff7 (diff) | |
download | linux-stable-d42e3fae6faedacb2a7b4c984417ed0d9f540401.tar.gz linux-stable-d42e3fae6faedacb2a7b4c984417ed0d9f540401.tar.bz2 linux-stable-d42e3fae6faedacb2a7b4c984417ed0d9f540401.zip |
kvm: x86: Read PDPTEs on CR0.CD and CR0.NW changes
According to the SDM, when PAE paging would be in use following a
MOV-to-CR0 that modifies any of CR0.CD, CR0.NW, or CR0.PG, then the
PDPTEs are loaded from the address in CR3. Previously, kvm only loaded
the PDPTEs when PAE paging would be in use following a MOV-to-CR0 that
modified CR0.PG.
Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Reviewed-by: Peter Shier <pshier@google.com>
Message-Id: <20200707223630.336700-1-jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/x86.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bd8690ca7b6b..1153ce7d118b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -798,6 +798,7 @@ EXPORT_SYMBOL_GPL(pdptrs_changed); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); + unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG; unsigned long update_bits = X86_CR0_PG | X86_CR0_WP; cr0 |= X86_CR0_ET; @@ -815,9 +816,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; - if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { + if (cr0 & X86_CR0_PG) { #ifdef CONFIG_X86_64 - if ((vcpu->arch.efer & EFER_LME)) { + if (!is_paging(vcpu) && (vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) @@ -827,8 +828,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) return 1; } else #endif - if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, - kvm_read_cr3(vcpu))) + if (is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) && + !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } |