summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-11-13 16:38:25 +0000
committerMarc Zyngier <maz@kernel.org>2022-11-19 12:43:47 +0000
commit26d2d0594d7016dbcbce4038aa202c2858d5a944 (patch)
tree9905b537a3d3a093922754f773613c1ddc6bbc3f /arch/arm64
parent9917264d74d9063341968a8e071266358496777b (diff)
downloadlinux-stable-26d2d0594d7016dbcbce4038aa202c2858d5a944.tar.gz
linux-stable-26d2d0594d7016dbcbce4038aa202c2858d5a944.tar.bz2
linux-stable-26d2d0594d7016dbcbce4038aa202c2858d5a944.zip
KVM: arm64: PMU: Do not let AArch32 change the counters' top 32 bits
Even when using PMUv3p5 (which implies 64bit counters), there is no way for AArch32 to write to the top 32 bits of the counters. The only way to influence these bits (other than by counting events) is by writing PMCR.P==1. Make sure we obey the architecture and preserve the top 32 bits on a counter update. Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20221113163832.3154370-10-maz@kernel.org
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kvm/pmu-emul.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index ea0c8411641f..7a945fa6dd03 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -119,13 +119,8 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
return counter;
}
-/**
- * kvm_pmu_set_counter_value - set PMU counter value
- * @vcpu: The vcpu pointer
- * @select_idx: The counter index
- * @val: The counter value
- */
-void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+static void kvm_pmu_set_counter(struct kvm_vcpu *vcpu, u64 select_idx, u64 val,
+ bool force)
{
u64 reg;
@@ -135,6 +130,19 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]);
reg = counter_index_to_reg(select_idx);
+
+ if (vcpu_mode_is_32bit(vcpu) && select_idx != ARMV8_PMU_CYCLE_IDX &&
+ !force) {
+ /*
+ * Even with PMUv3p5, AArch32 cannot write to the top
+ * 32bit of the counters. The only possible course of
+ * action is to use PMCR.P, which will reset them to
+ * 0 (the only use of the 'force' parameter).
+ */
+ val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
+ val |= lower_32_bits(val);
+ }
+
__vcpu_sys_reg(vcpu, reg) = val;
/* Recreate the perf event to reflect the updated sample_period */
@@ -142,6 +150,17 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
}
/**
+ * kvm_pmu_set_counter_value - set PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+ kvm_pmu_set_counter(vcpu, select_idx, val, false);
+}
+
+/**
* kvm_pmu_release_perf_event - remove the perf event
* @pmc: The PMU counter pointer
*/
@@ -533,7 +552,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
- kvm_pmu_set_counter_value(vcpu, i, 0);
+ kvm_pmu_set_counter(vcpu, i, 0, true);
}
}