summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQuentin Perret <qperret@google.com>2021-03-19 10:01:45 +0000
committerMarc Zyngier <maz@kernel.org>2021-03-19 12:02:19 +0000
commit9589a38cdfeba0889590e6ef4627b439034d456c (patch)
tree25174a67c8df3b592cee65891edd6502a042d691
parentb83042f0f143a5e9e899924987b542b2ac766e53 (diff)
downloadlinux-9589a38cdfeba0889590e6ef4627b439034d456c.tar.gz
linux-9589a38cdfeba0889590e6ef4627b439034d456c.tar.bz2
linux-9589a38cdfeba0889590e6ef4627b439034d456c.zip
KVM: arm64: Disable PMU support in protected mode
The host currently writes directly in EL2 per-CPU data sections from the PMU code when running in nVHE. In preparation for unmapping the EL2 sections from the host stage 2, disable PMU support in protected mode as we currently do not have a use-case for it. Acked-by: Will Deacon <will@kernel.org> Signed-off-by: Quentin Perret <qperret@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210319100146.1149909-38-qperret@google.com
-rw-r--r--arch/arm64/kvm/perf.c3
-rw-r--r--arch/arm64/kvm/pmu.c8
2 files changed, 6 insertions, 5 deletions
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
index 739164324afe..8f860ae56bb7 100644
--- a/arch/arm64/kvm/perf.c
+++ b/arch/arm64/kvm/perf.c
@@ -55,7 +55,8 @@ int kvm_perf_init(void)
* hardware performance counters. This could ensure the presence of
* a physical PMU and CONFIG_PERF_EVENT is selected.
*/
- if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0)
+ if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0
+ && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index faf32a44ba04..03a6c1f4a09a 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -33,7 +33,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
- if (!ctx || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -49,7 +49,7 @@ void kvm_clr_pmu_events(u32 clr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
- if (!ctx)
+ if (!kvm_arm_support_pmu_v3() || !ctx)
return;
ctx->pmu_events.events_host &= ~clr;
@@ -172,7 +172,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
struct kvm_host_data *host;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
preempt_disable();
@@ -193,7 +193,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
struct kvm_host_data *host;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
host = this_cpu_ptr_hyp_sym(kvm_host_data);