summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2023-07-28 18:15:50 -0700
committerSean Christopherson <seanjc@google.com>2023-08-03 17:16:28 -0700
commitc0dc39bd2c584879748dfd1321f8541d1cbeb9b8 (patch)
tree32aa9d714bb09aca4fb61f595409a7a809aff077 /arch
parent0c94e2468491cbf0754f49a5136ab51294a96b69 (diff)
downloadlinux-stable-c0dc39bd2c584879748dfd1321f8541d1cbeb9b8.tar.gz
linux-stable-c0dc39bd2c584879748dfd1321f8541d1cbeb9b8.tar.bz2
linux-stable-c0dc39bd2c584879748dfd1321f8541d1cbeb9b8.zip
KVM: nSVM: Use the "outer" helper for writing multiplier to MSR_AMD64_TSC_RATIO
When emulating nested SVM transitions, use the outer helper for writing the TSC multiplier for L2. Using the inner helper only for one-off cases, i.e. for paths where KVM is NOT emulating or modifying vCPU state, will allow for multiple cleanups: - Explicitly disabling preemption only in the outer helper - Getting the multiplier from the vCPU field in the outer helper - Skipping the WRMSR in the outer helper if guest state isn't loaded Opportunistically delete an extra newline. No functional change intended. Link: https://lore.kernel.org/r/20230729011608.1065019-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/svm/nested.c4
-rw-r--r--arch/x86/kvm/svm/svm.c5
-rw-r--r--arch/x86/kvm/svm/svm.h2
3 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index c66c823ae222..5d5a1d7832fb 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1103,7 +1103,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (kvm_caps.has_tsc_control &&
vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
- __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
}
svm->nested.ctl.nested_cr3 = 0;
@@ -1536,7 +1536,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
vcpu->arch.tsc_scaling_ratio =
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
svm->tsc_ratio_msr);
- __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
}
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 4785d780cce3..7217f7532df9 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -567,7 +567,7 @@ static int svm_check_processor_compat(void)
return 0;
}
-void __svm_write_tsc_multiplier(u64 multiplier)
+static void __svm_write_tsc_multiplier(u64 multiplier)
{
preempt_disable();
@@ -1150,12 +1150,11 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
-static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
{
__svm_write_tsc_multiplier(multiplier);
}
-
/* Evaluate instruction intercepts that depend on guest CPUID features. */
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 18af7e712a5a..7132c0a04817 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -658,7 +658,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
-void __svm_write_tsc_multiplier(u64 multiplier);
+void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
struct vmcb_control_area *control);
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,