summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/pauth.c
blob: d5eb3ae876be4f58f99dda387d12c1a5b38dd53e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2024 - Google LLC
 * Author: Marc Zyngier <maz@kernel.org>
 *
 * Primitive PAuth emulation for ERETAA/ERETAB.
 *
 * This code assumes that is is run from EL2, and that it is part of
 * the emulation of ERETAx for a guest hypervisor. That's a lot of
 * baked-in assumptions and shortcuts.
 *
 * Do no reuse for anything else!
 */

#include <linux/kvm_host.h>

#include <asm/gpr-num.h>
#include <asm/kvm_emulate.h>
#include <asm/pointer_auth.h>

/* PACGA Xd, Xn, Xm */
#define PACGA(d,n,m)					\
	asm volatile(__DEFINE_ASM_GPR_NUMS		\
		     ".inst 0x9AC03000          |"	\
		     "(.L__gpr_num_%[Rd] << 0)  |"	\
		     "(.L__gpr_num_%[Rn] << 5)  |"	\
		     "(.L__gpr_num_%[Rm] << 16)\n"	\
		     : [Rd] "=r" ((d))			\
		     : [Rn] "r" ((n)), [Rm] "r" ((m)))

static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
		       struct ptrauth_key ikey)
{
	struct ptrauth_key gkey;
	u64 mod, pac = 0;

	preempt_disable();

	if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
		mod = __vcpu_sys_reg(vcpu, SP_EL2);
	else
		mod = read_sysreg(sp_el1);

	gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
	gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);

	__ptrauth_key_install_nosync(APGA, ikey);
	isb();

	PACGA(pac, ptr, mod);
	isb();

	__ptrauth_key_install_nosync(APGA, gkey);

	preempt_enable();

	/* PAC in the top 32bits */
	return pac;
}

static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
{
	u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
	bool tbi, tbid;

	/*
	 * Since we are authenticating an instruction address, we have
	 * to take TBID into account. If E2H==0, ignore VA[55], as
	 * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
	 * this case, this is likely a guest bug...
	 */
	if (!vcpu_el2_e2h_is_set(vcpu)) {
		tbi = tcr & BIT(20);
		tbid = tcr & BIT(29);
	} else if (bit55) {
		tbi = tcr & TCR_TBI1;
		tbid = tcr & TCR_TBID1;
	} else {
		tbi = tcr & TCR_TBI0;
		tbid = tcr & TCR_TBID0;
	}

	return tbi && !tbid;
}

static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
{
	static const int maxtxsz = 39; // Revisit these two values once
	static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
	u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
	int txsz;

	if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
		txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
	else
		txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);

	return 64 - clamp(txsz, mintxsz, maxtxsz);
}

static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
{
	int bottom_pac;
	u64 mask;

	bottom_pac = compute_bottom_pac(vcpu, bit55);

	mask = GENMASK(54, bottom_pac);
	if (!effective_tbi(vcpu, bit55))
		mask |= GENMASK(63, 56);

	return mask;
}

static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
{
	bool bit55 = !!(ptr & BIT(55));

	if (bit55)
		return ptr | mask;

	return ptr & ~mask;
}

static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
{
	bool bit55 = !!(ptr & BIT(55));
	u64 mask, error_code;
	int shift;

	if (effective_tbi(vcpu, bit55)) {
		mask = GENMASK(54, 53);
		shift = 53;
	} else {
		mask = GENMASK(62, 61);
		shift = 61;
	}

	if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
		error_code = 2 << shift;
	else
		error_code = 1 << shift;

	ptr &= ~mask;
	ptr |= error_code;

	return ptr;
}

/*
 * Authenticate an ERETAA/ERETAB instruction, returning true if the
 * authentication succeeded and false otherwise. In all cases, *elr
 * contains the VA to ERET to. Potential exception injection is left
 * to the caller.
 */
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
{
	u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
	u64 esr = kvm_vcpu_get_esr(vcpu);
	u64 ptr, cptr, pac, mask;
	struct ptrauth_key ikey;

	*elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);

	/* We assume we're already in the context of an ERETAx */
	if (esr_iss_is_eretab(esr)) {
		if (!(sctlr & SCTLR_EL1_EnIB))
			return true;

		ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
		ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
	} else {
		if (!(sctlr & SCTLR_EL1_EnIA))
			return true;

		ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
		ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
	}

	mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
	cptr = to_canonical_addr(vcpu, ptr, mask);

	pac = compute_pac(vcpu, cptr, ikey);

	/*
	 * Slightly deviate from the pseudocode: if we have a PAC
	 * match with the signed pointer, then it must be good.
	 * Anything after this point is pure error handling.
	 */
	if ((pac & mask) == (ptr & mask)) {
		*elr = cptr;
		return true;
	}

	/*
	 * Authentication failed, corrupt the canonical address if
	 * PAuth2 isn't implemented, or some XORing if it is.
	 */
	if (!kvm_has_pauth(vcpu->kvm, PAuth2))
		cptr = corrupt_addr(vcpu, cptr);
	else
		cptr = ptr ^ (pac & mask);

	*elr = cptr;
	return false;
}