summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-06-07 17:08:19 -0700
committerSean Christopherson <seanjc@google.com>2024-06-11 09:35:58 -0700
commit3b65a692a5c78d597d23a2838e91d7bf8ef49291 (patch)
treed38d44a4b99b977aadd858a08997a03d3bb4d5bc
parent75430c412a3139c29404459ab1216a07d1280428 (diff)
downloadlinux-stable-3b65a692a5c78d597d23a2838e91d7bf8ef49291.tar.gz
linux-stable-3b65a692a5c78d597d23a2838e91d7bf8ef49291.tar.bz2
linux-stable-3b65a692a5c78d597d23a2838e91d7bf8ef49291.zip
KVM: x86/pmu: Add a helper to enable bits in FIXED_CTR_CTRL
Add a helper, intel_pmu_enable_fixed_counter_bits(), to dedup code that enables fixed counter bits, i.e. when KVM clears bits in the reserved mask used to detect invalid MSR_CORE_PERF_FIXED_CTR_CTRL values. No functional change intended. Cc: Dapeng Mi <dapeng1.mi@linux.intel.com> Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20240608000819.3296176-1-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index e01c87981927..fb5cbd6cbeff 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
return eventsel;
}
+static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
+{
+ int i;
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
+ pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
+}
+
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -457,7 +465,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
union cpuid10_edx edx;
u64 perf_capabilities;
u64 counter_rsvd;
- int i;
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
@@ -501,12 +508,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1;
}
- for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
- pmu->fixed_ctr_ctrl_rsvd &=
- ~intel_fixed_bits_by_idx(i,
- INTEL_FIXED_0_KERNEL |
- INTEL_FIXED_0_USER |
- INTEL_FIXED_0_ENABLE_PMI);
+ intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
+ INTEL_FIXED_0_USER |
+ INTEL_FIXED_0_ENABLE_PMI);
counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
@@ -551,10 +555,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_rsvd = counter_rsvd;
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
- for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
- pmu->fixed_ctr_ctrl_rsvd &=
- ~intel_fixed_bits_by_idx(i, ICL_FIXED_0_ADAPTIVE);
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
+ intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
} else {
pmu->pebs_enable_rsvd =
~((1ull << pmu->nr_arch_gp_counters) - 1);