diff options
Diffstat (limited to 'arch/x86/kvm/cpuid.h')
-rw-r--r-- | arch/x86/kvm/cpuid.h | 157 |
1 files changed, 65 insertions, 92 deletions
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 23dbb9eb277c..d2884162a46a 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -2,7 +2,6 @@ #ifndef ARCH_X86_KVM_CPUID_H #define ARCH_X86_KVM_CPUID_H -#include "x86.h" #include "reverse_cpuid.h" #include <asm/cpu.h> #include <asm/processor.h> @@ -11,8 +10,7 @@ extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; void kvm_set_cpu_caps(void); -void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); -void kvm_update_pv_runtime(struct kvm_vcpu *vcpu); +void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, @@ -32,6 +30,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); +void __init kvm_init_xstate_sizes(void); u32 xstate_required_size(u64 xstate_bv, bool compacted); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); @@ -67,59 +66,40 @@ static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, *reg = kvm_cpu_caps[leaf]; } -static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, - unsigned int x86_feature) +static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); struct kvm_cpuid_entry2 *entry; + u32 *reg; + + /* + * XSAVES is a special snowflake. Due to lack of a dedicated intercept + * on SVM, KVM must assume that XSAVES (and thus XRSTORS) is usable by + * the guest if the host supports XSAVES and *XSAVE* is exposed to the + * guest. Because the guest can execute XSAVES and XRSTORS, i.e. can + * indirectly consume XSS, KVM must ensure XSS is zeroed when running + * the guest, i.e. must set XSAVES in vCPU capabilities. But to reject + * direct XSS reads and writes (to minimize the virtualization hole and + * honor userspace's CPUID), KVM needs to check the raw guest CPUID, + * not KVM's view of guest capabilities. + * + * For all other features, guest capabilities are accurate. Expand + * this allowlist with extreme vigilance. + */ + BUILD_BUG_ON(x86_feature != X86_FEATURE_XSAVES); entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index); if (!entry) return NULL; - return __cpuid_entry_get_reg(entry, cpuid.reg); -} - -static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, - unsigned int x86_feature) -{ - u32 *reg; - - reg = guest_cpuid_get_register(vcpu, x86_feature); + reg = __cpuid_entry_get_reg(entry, cpuid.reg); if (!reg) return false; return *reg & __feature_bit(x86_feature); } -static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, - unsigned int x86_feature) -{ - u32 *reg; - - reg = guest_cpuid_get_register(vcpu, x86_feature); - if (reg) - *reg &= ~__feature_bit(x86_feature); -} - -static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0); - return best && - (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) || - is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); -} - -static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu) -{ - struct kvm_cpuid_entry2 *best; - - best = kvm_find_cpuid_entry(vcpu, 0); - return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx); -} - static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu) { return vcpu->arch.is_amd_compatible; @@ -168,21 +148,6 @@ static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) return x86_stepping(best->eax); } -static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) -{ - return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); -} - -static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) -{ - return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) || - guest_cpuid_has(vcpu, X86_FEATURE_SBPB)); -} - static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu) { return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; @@ -198,7 +163,6 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) { unsigned int x86_leaf = __feature_leaf(x86_feature); - reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); } @@ -206,7 +170,6 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) { unsigned int x86_leaf = __feature_leaf(x86_feature); - reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); } @@ -214,7 +177,6 @@ static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) { unsigned int x86_leaf = __feature_leaf(x86_feature); - reverse_cpuid_check(x86_leaf); return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); } @@ -238,58 +200,69 @@ static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); } -enum kvm_governed_features { -#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x, -#include "governed_features.h" - KVM_NR_GOVERNED_FEATURES -}; - -static __always_inline int kvm_governed_feature_index(unsigned int x86_feature) +static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { - switch (x86_feature) { -#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x; -#include "governed_features.h" - default: - return -1; - } -} + unsigned int x86_leaf = __feature_leaf(x86_feature); -static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature) -{ - return kvm_governed_feature_index(x86_feature) >= 0; + vcpu->arch.cpu_caps[x86_leaf] |= __feature_bit(x86_feature); } -static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, - unsigned int x86_feature) +static __always_inline void guest_cpu_cap_clear(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { - BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); + unsigned int x86_leaf = __feature_leaf(x86_feature); - __set_bit(kvm_governed_feature_index(x86_feature), - vcpu->arch.governed_features.enabled); + vcpu->arch.cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); } -static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, - unsigned int x86_feature) +static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu, + unsigned int x86_feature, + bool guest_has_cap) { - if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature)) - kvm_governed_feature_set(vcpu, x86_feature); + if (guest_has_cap) + guest_cpu_cap_set(vcpu, x86_feature); + else + guest_cpu_cap_clear(vcpu, x86_feature); } -static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, - unsigned int x86_feature) +static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { - BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature)); + unsigned int x86_leaf = __feature_leaf(x86_feature); - return test_bit(kvm_governed_feature_index(x86_feature), - vcpu->arch.governed_features.enabled); + /* + * Except for MWAIT, querying dynamic feature bits is disallowed, so + * that KVM can defer runtime updates until the next CPUID emulation. + */ + BUILD_BUG_ON(x86_feature == X86_FEATURE_APIC || + x86_feature == X86_FEATURE_OSXSAVE || + x86_feature == X86_FEATURE_OSPKE); + + return vcpu->arch.cpu_caps[x86_leaf] & __feature_bit(x86_feature); } static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { - if (guest_can_use(vcpu, X86_FEATURE_LAM)) + if (guest_cpu_cap_has(vcpu, X86_FEATURE_LAM)) cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); return kvm_vcpu_is_legal_gpa(vcpu, cr3); } +static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) +{ + return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_STIBP) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_SSBD)); +} + +static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) +{ + return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB) || + guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB)); +} + #endif |