summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2023-11-09 18:28:50 -0800
committerSean Christopherson <seanjc@google.com>2024-02-01 09:35:47 -0800
commitb31880ca2f41dc2196e31d97e498b0fa884c2b2a (patch)
treef10b1c293f170a21e1ab3f4cc21a79f5f41bad66 /arch/x86/kvm/vmx
parentbe6b067dae1573cf4d53c8b08175d8872d82f030 (diff)
downloadlinux-stable-b31880ca2f41dc2196e31d97e498b0fa884c2b2a.tar.gz
linux-stable-b31880ca2f41dc2196e31d97e498b0fa884c2b2a.tar.bz2
linux-stable-b31880ca2f41dc2196e31d97e498b0fa884c2b2a.zip
KVM: x86/pmu: Move pmc_idx => pmc translation helper to common code
Add a common helper for *internal* PMC lookups, and delete the ops hook and Intel's implementation. Keep AMD's implementation, but rename it to amd_pmu_get_pmc() to make it somewhat more obvious that it's suited for both KVM-internal and guest-initiated lookups. Because KVM tracks all counters in a single bitmap, getting a counter when iterating over a bitmap, e.g. of all valid PMCs, requires a small amount of math, that while simple, isn't super obvious and doesn't use the same semantics as PMC lookups from RDPMC! Although AMD doesn't support fixed counters, the common PMU code still behaves as if there a split, the high half of which just happens to always be empty. Opportunstically add a comment to explain both what is going on, and why KVM uses a single bitmap, e.g. the boilerplate for iterating over separate bitmaps could be done via macros, so it's not (just) about deduplicating code. Link: https://lore.kernel.org/r/20231110022857.1273836-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/vmx')
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index fe7a2ba51e1b..845a964f22a6 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -55,18 +55,6 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
}
}
-static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
- if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) {
- return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
- MSR_P6_EVNTSEL0);
- } else {
- u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX;
-
- return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
- }
-}
-
static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
@@ -718,7 +706,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
X86_PMC_IDX_MAX) {
- pmc = intel_pmc_idx_to_pmc(pmu, bit);
+ pmc = kvm_pmc_idx_to_pmc(pmu, bit);
if (!pmc || !pmc_speculative_in_use(pmc) ||
!pmc_is_globally_enabled(pmc) || !pmc->perf_event)
@@ -735,7 +723,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}
struct kvm_pmu_ops intel_pmu_ops __initdata = {
- .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
.is_valid_msr = intel_is_valid_msr,