diff options
author | Sean Christopherson <seanjc@google.com> | 2023-08-15 13:36:48 -0700 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2023-08-17 11:43:29 -0700 |
commit | 4d2a1560ffc29525493829ee31dc069c00c52c69 (patch) | |
tree | 503e449a49e5ab21490a415c4cdb2f7d7b09b96c /arch | |
parent | 4365a45571c791a2fbeb81cf27738960c5456f57 (diff) | |
download | linux-stable-4d2a1560ffc29525493829ee31dc069c00c52c69.tar.gz linux-stable-4d2a1560ffc29525493829ee31dc069c00c52c69.tar.bz2 linux-stable-4d2a1560ffc29525493829ee31dc069c00c52c69.zip |
KVM: nSVM: Use KVM-governed feature framework to track "vVM{SAVE,LOAD} enabled"
Track "virtual VMSAVE/VMLOAD exposed to L1" via a governed feature flag
instead of using a dedicated bit/flag in vcpu_svm.
Opportunistically add a comment explaining why KVM disallows virtual
VMLOAD/VMSAVE when the vCPU model is Intel.
No functional change intended.
Link: https://lore.kernel.org/r/20230815203653.519297-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/governed_features.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/svm/nested.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 10 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.h | 1 |
4 files changed, 9 insertions, 5 deletions
diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 32c0469cf952..f01a95fd0071 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -10,6 +10,7 @@ KVM_GOVERNED_X86_FEATURE(XSAVES) KVM_GOVERNED_X86_FEATURE(VMX) KVM_GOVERNED_X86_FEATURE(NRIPS) KVM_GOVERNED_X86_FEATURE(TSCRATEMSR) +KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index da65948064dc..24d47ebeb0e0 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -107,7 +107,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) { - if (!svm->v_vmload_vmsave_enabled) + if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD)) return true; if (!nested_npt_enabled(svm)) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0d5cca0bb5fb..78d53ea513f4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1194,8 +1194,6 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); - - svm->v_vmload_vmsave_enabled = false; } else { /* * If hardware supports Virtual VMLOAD VMSAVE then enable it @@ -4266,7 +4264,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); - svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); + /* + * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that + * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing + * SVM on Intel is bonkers and extremely unlikely to work). + */ + if (!guest_cpuid_is_intel(vcpu)) + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) && guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 4e7332f77702..b475241df6dc 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -259,7 +259,6 @@ struct vcpu_svm { bool soft_int_injected; /* optional nested SVM features that are enabled for this guest */ - bool v_vmload_vmsave_enabled : 1; bool lbrv_enabled : 1; bool pause_filter_enabled : 1; bool pause_threshold_enabled : 1; |