summaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm/kvm_vcpu_pmu.h
blob: 395518a1664e0066bfd1c47f8124025533812bab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2023 Rivos Inc
 *
 * Authors:
 *     Atish Patra <atishp@rivosinc.com>
 */

#ifndef __KVM_VCPU_RISCV_PMU_H
#define __KVM_VCPU_RISCV_PMU_H

#include <linux/perf/riscv_pmu.h>
#include <asm/sbi.h>

#ifdef CONFIG_RISCV_PMU_SBI
#define RISCV_KVM_MAX_FW_CTRS	32
#define RISCV_KVM_MAX_HW_CTRS	32
#define RISCV_KVM_MAX_COUNTERS	(RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
static_assert(RISCV_KVM_MAX_COUNTERS <= 64);

struct kvm_fw_event {
	/* Current value of the event */
	unsigned long value;

	/* Event monitoring status */
	bool started;
};

/* Per virtual pmu counter data */
struct kvm_pmc {
	u8 idx;
	struct perf_event *perf_event;
	u64 counter_val;
	union sbi_pmu_ctr_info cinfo;
	/* Event monitoring status */
	bool started;
	/* Monitoring event ID */
	unsigned long event_idx;
};

/* PMU data structure per vcpu */
struct kvm_pmu {
	struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
	struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
	/* Number of the virtual firmware counters available */
	int num_fw_ctrs;
	/* Number of the virtual hardware counters available */
	int num_hw_ctrs;
	/* A flag to indicate that pmu initialization is done */
	bool init_done;
	/* Bit map of all the virtual counter used */
	DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
};

#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu_context))

#if defined(CONFIG_32BIT)
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLEH,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm }, \
{.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
#else
#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = CSR_CYCLE,	.count = 31,	.func = kvm_riscv_vcpu_pmu_read_hpm },
#endif

int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
				unsigned long *val, unsigned long new_val,
				unsigned long wr_mask);

int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
				struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
				 unsigned long ctr_mask, unsigned long flags, u64 ival,
				 struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
				unsigned long ctr_mask, unsigned long flags,
				struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
				     unsigned long ctr_mask, unsigned long flags,
				     unsigned long eidx, u64 evtdata,
				     struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
				struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);

#else
struct kvm_pmu {
};

#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
{.base = 0,	.count = 0,	.func = NULL },

static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
{
	return 0;
}

static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
#endif /* CONFIG_RISCV_PMU_SBI */
#endif /* !__KVM_VCPU_RISCV_PMU_H */