diff options
author | Wei Wang <wei.w.wang@intel.com> | 2024-05-07 21:31:03 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2024-07-16 12:14:12 -0400 |
commit | 5d766508fd15d7e8c90574ef270f0c163b750b45 (patch) | |
tree | 362b5772908a9382edd59286e990c25287aefad3 /arch/x86 | |
parent | 896046474f8d2ea711f63576b3ff89f88e273aef (diff) | |
download | linux-stable-5d766508fd15d7e8c90574ef270f0c163b750b45.tar.gz linux-stable-5d766508fd15d7e8c90574ef270f0c163b750b45.tar.bz2 linux-stable-5d766508fd15d7e8c90574ef270f0c163b750b45.zip |
KVM: x86/pmu: Add kvm_pmu_call() to simplify static calls of kvm_pmu_ops
Similar to kvm_x86_call(), kvm_pmu_call() is added to streamline the usage
of static calls of kvm_pmu_ops, which improves code readability.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Link: https://lore.kernel.org/r/20240507133103.15052-4-wei.w.wang@intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/pmu.c | 24 |
2 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2f56b1961e69..950a03e0181e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1875,6 +1875,7 @@ extern bool __read_mostly enable_apicv; extern struct kvm_x86_ops kvm_x86_ops; #define kvm_x86_call(func) static_call(kvm_x86_##func) +#define kvm_pmu_call(func) static_call(kvm_x86_pmu_##func) #define KVM_X86_OP(func) \ DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func)); diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index feea936476fd..47a46283c866 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx) if (!kvm_pmu_ops.check_rdpmc_early) return 0; - return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx); + return kvm_pmu_call(check_rdpmc_early)(vcpu, idx); } bool is_vmware_backdoor_pmc(u32 pmc_idx) @@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) if (is_vmware_backdoor_pmc(idx)) return kvm_pmu_rdpmc_vmware(vcpu, idx, data); - pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask); + pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask); if (!pmc) return 1; @@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) { if (lapic_in_kernel(vcpu)) { - static_call(kvm_x86_pmu_deliver_pmi)(vcpu); + kvm_pmu_call(deliver_pmi)(vcpu); kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); } } @@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) default: break; } - return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) || - static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr); + return kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr) || + kvm_pmu_call(is_valid_msr)(vcpu, msr); } static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr); + struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr); if (pmc) __set_bit(pmc->idx, pmu->pmc_in_use); @@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0; break; default: - return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info); + return kvm_pmu_call(get_msr)(vcpu, msr_info); } return 0; @@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; default: kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index); - return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info); + return kvm_pmu_call(set_msr)(vcpu, msr_info); } return 0; @@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu) pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; - static_call(kvm_x86_pmu_reset)(vcpu); + kvm_pmu_call(reset)(vcpu); } @@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) if (!vcpu->kvm->arch.enable_pmu) return; - static_call(kvm_x86_pmu_refresh)(vcpu); + kvm_pmu_call(refresh)(vcpu); /* * At RESET, both Intel and AMD CPUs set all enable bits for general @@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu) struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); memset(pmu, 0, sizeof(*pmu)); - static_call(kvm_x86_pmu_init)(vcpu); + kvm_pmu_call(init)(vcpu); kvm_pmu_refresh(vcpu); } @@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) pmc_stop_counter(pmc); } - static_call(kvm_x86_pmu_cleanup)(vcpu); + kvm_pmu_call(cleanup)(vcpu); bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); } |