summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtish Patra <atishp@rivosinc.com>2024-08-16 00:08:08 -0700
committerAnup Patel <anup@brainfault.org>2024-08-19 08:58:19 +0530
commit7d1ffc8b087e97dbe1985912c7a2d00e53cea169 (patch)
tree3c159d26fe929dd223687b45dbfac2613f334380
parent47d40d93292d9cff8dabb735bed83d930fa03950 (diff)
downloadlinux-stable-7d1ffc8b087e97dbe1985912c7a2d00e53cea169.tar.gz
linux-stable-7d1ffc8b087e97dbe1985912c7a2d00e53cea169.tar.bz2
linux-stable-7d1ffc8b087e97dbe1985912c7a2d00e53cea169.zip
RISC-V: KVM: Allow legacy PMU access from guest
Currently, KVM traps & emulates PMU counter access only if SBI PMU is available as the guest can only configure/read PMU counters via SBI only. However, if SBI PMU is not enabled in the host, the guest will fallback to the legacy PMU which will try to access cycle/instret and result in an illegal instruction trap which is not desired. KVM can allow dummy emulation of cycle/instret only for the guest if SBI PMU is not enabled in the host. The dummy emulation will still return zero as we don't to expose the host counter values from a guest using legacy PMU. Fixes: a9ac6c37521f ("RISC-V: KVM: Implement trap & emulate for hpmcounters") Signed-off-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20240816-kvm_pmu_fixes-v1-1-cdfce386dd93@rivosinc.com Signed-off-by: Anup Patel <anup@brainfault.org>
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h15
1 files changed, 14 insertions, 1 deletions
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
index fa0f535bbbf0..c309daa2d75a 100644
--- a/arch/riscv/include/asm/kvm_vcpu_pmu.h
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -10,6 +10,7 @@
#define __KVM_VCPU_RISCV_PMU_H
#include <linux/perf/riscv_pmu.h>
+#include <asm/kvm_vcpu_insn.h>
#include <asm/sbi.h>
#ifdef CONFIG_RISCV_PMU_SBI
@@ -104,8 +105,20 @@ void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};
+static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask)
+{
+ if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
+ *val = 0;
+ return KVM_INSN_CONTINUE_NEXT_SEPC;
+ } else {
+ return KVM_INSN_ILLEGAL_TRAP;
+ }
+}
+
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
-{.base = 0, .count = 0, .func = NULL },
+{.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)