summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/pmu.c
blob: a243934c5568bb0bffbc5e3b1828707a56aadad8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2019 Arm Limited
 * Author: Andrew Murray <Andrew.Murray@arm.com>
 */
#include <linux/kvm_host.h>
#include <linux/perf_event.h>

static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);

/*
 * Given the perf event attributes and system type, determine
 * if we are going to need to switch counters at guest entry/exit.
 */
static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
{
	/**
	 * With VHE the guest kernel runs at EL1 and the host at EL2,
	 * where user (EL0) is excluded then we have no reason to switch
	 * counters.
	 */
	if (has_vhe() && attr->exclude_user)
		return false;

	/* Only switch if attributes are different */
	return (attr->exclude_host != attr->exclude_guest);
}

struct kvm_pmu_events *kvm_get_pmu_events(void)
{
	return this_cpu_ptr(&kvm_pmu_events);
}

/*
 * Add events to track that we may want to switch at guest entry/exit
 * time.
 */
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
	struct kvm_pmu_events *pmu = kvm_get_pmu_events();

	if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
		return;

	if (!attr->exclude_host)
		pmu->events_host |= set;
	if (!attr->exclude_guest)
		pmu->events_guest |= set;
}

/*
 * Stop tracking events
 */
void kvm_clr_pmu_events(u32 clr)
{
	struct kvm_pmu_events *pmu = kvm_get_pmu_events();

	if (!kvm_arm_support_pmu_v3())
		return;

	pmu->events_host &= ~clr;
	pmu->events_guest &= ~clr;
}

#define PMEVTYPER_READ_CASE(idx)				\
	case idx:						\
		return read_sysreg(pmevtyper##idx##_el0)

#define PMEVTYPER_WRITE_CASE(idx)				\
	case idx:						\
		write_sysreg(val, pmevtyper##idx##_el0);	\
		break

#define PMEVTYPER_CASES(readwrite)				\
	PMEVTYPER_##readwrite##_CASE(0);			\
	PMEVTYPER_##readwrite##_CASE(1);			\
	PMEVTYPER_##readwrite##_CASE(2);			\
	PMEVTYPER_##readwrite##_CASE(3);			\
	PMEVTYPER_##readwrite##_CASE(4);			\
	PMEVTYPER_##readwrite##_CASE(5);			\
	PMEVTYPER_##readwrite##_CASE(6);			\
	PMEVTYPER_##readwrite##_CASE(7);			\
	PMEVTYPER_##readwrite##_CASE(8);			\
	PMEVTYPER_##readwrite##_CASE(9);			\
	PMEVTYPER_##readwrite##_CASE(10);			\
	PMEVTYPER_##readwrite##_CASE(11);			\
	PMEVTYPER_##readwrite##_CASE(12);			\
	PMEVTYPER_##readwrite##_CASE(13);			\
	PMEVTYPER_##readwrite##_CASE(14);			\
	PMEVTYPER_##readwrite##_CASE(15);			\
	PMEVTYPER_##readwrite##_CASE(16);			\
	PMEVTYPER_##readwrite##_CASE(17);			\
	PMEVTYPER_##readwrite##_CASE(18);			\
	PMEVTYPER_##readwrite##_CASE(19);			\
	PMEVTYPER_##readwrite##_CASE(20);			\
	PMEVTYPER_##readwrite##_CASE(21);			\
	PMEVTYPER_##readwrite##_CASE(22);			\
	PMEVTYPER_##readwrite##_CASE(23);			\
	PMEVTYPER_##readwrite##_CASE(24);			\
	PMEVTYPER_##readwrite##_CASE(25);			\
	PMEVTYPER_##readwrite##_CASE(26);			\
	PMEVTYPER_##readwrite##_CASE(27);			\
	PMEVTYPER_##readwrite##_CASE(28);			\
	PMEVTYPER_##readwrite##_CASE(29);			\
	PMEVTYPER_##readwrite##_CASE(30)

/*
 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 */
static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
{
	switch (idx) {
	PMEVTYPER_CASES(READ);
	case ARMV8_PMU_CYCLE_IDX:
		return read_sysreg(pmccfiltr_el0);
	default:
		WARN_ON(1);
	}

	return 0;
}

/*
 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
 */
static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
{
	switch (idx) {
	PMEVTYPER_CASES(WRITE);
	case ARMV8_PMU_CYCLE_IDX:
		write_sysreg(val, pmccfiltr_el0);
		break;
	default:
		WARN_ON(1);
	}
}

/*
 * Modify ARMv8 PMU events to include EL0 counting
 */
static void kvm_vcpu_pmu_enable_el0(unsigned long events)
{
	u64 typer;
	u32 counter;

	for_each_set_bit(counter, &events, 32) {
		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
	}
}

/*
 * Modify ARMv8 PMU events to exclude EL0 counting
 */
static void kvm_vcpu_pmu_disable_el0(unsigned long events)
{
	u64 typer;
	u32 counter;

	for_each_set_bit(counter, &events, 32) {
		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
		typer |= ARMV8_PMU_EXCLUDE_EL0;
		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
	}
}

/*
 * On VHE ensure that only guest events have EL0 counting enabled.
 * This is called from both vcpu_{load,put} and the sysreg handling.
 * Since the latter is preemptible, special care must be taken to
 * disable preemption.
 */
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu_events *pmu;
	u32 events_guest, events_host;

	if (!kvm_arm_support_pmu_v3() || !has_vhe())
		return;

	preempt_disable();
	pmu = kvm_get_pmu_events();
	events_guest = pmu->events_guest;
	events_host = pmu->events_host;

	kvm_vcpu_pmu_enable_el0(events_guest);
	kvm_vcpu_pmu_disable_el0(events_host);
	preempt_enable();
}

/*
 * On VHE ensure that only host events have EL0 counting enabled
 */
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
	struct kvm_pmu_events *pmu;
	u32 events_guest, events_host;

	if (!kvm_arm_support_pmu_v3() || !has_vhe())
		return;

	pmu = kvm_get_pmu_events();
	events_guest = pmu->events_guest;
	events_host = pmu->events_host;

	kvm_vcpu_pmu_enable_el0(events_host);
	kvm_vcpu_pmu_disable_el0(events_guest);
}

/*
 * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU
 * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched
 * to the value for the guest on vcpu_load().  The value for the host EL0
 * will be restored on vcpu_put(), before returning to userspace.
 * This isn't necessary for nVHE, as the register is context switched for
 * every guest enter/exit.
 *
 * Return true if KVM takes care of the register. Otherwise return false.
 */
bool kvm_set_pmuserenr(u64 val)
{
	struct kvm_cpu_context *hctxt;
	struct kvm_vcpu *vcpu;

	if (!kvm_arm_support_pmu_v3() || !has_vhe())
		return false;

	vcpu = kvm_get_running_vcpu();
	if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
		return false;

	hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
	ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
	return true;
}

/*
 * If we interrupted the guest to update the host PMU context, make
 * sure we re-apply the guest EL0 state.
 */
void kvm_vcpu_pmu_resync_el0(void)
{
	struct kvm_vcpu *vcpu;

	if (!has_vhe() || !in_interrupt())
		return;

	vcpu = kvm_get_running_vcpu();
	if (!vcpu)
		return;

	kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
}