summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/hyp.h
blob: 44eaff70da6ae0644fce7df9901dbd4aa71705ec (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ARM64_KVM_HYP_H__
#define __ARM64_KVM_HYP_H__

#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>

#define __hyp_text __section(.hyp.text) notrace

static inline unsigned long __kern_hyp_va(unsigned long v)
{
	asm volatile(ALTERNATIVE("and %0, %0, %1",
				 "nop",
				 ARM64_HAS_VIRT_HOST_EXTN)
		     : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
	return v;
}

#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))

static inline unsigned long __hyp_kern_va(unsigned long v)
{
	u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
	asm volatile(ALTERNATIVE("add %0, %0, %1",
				 "nop",
				 ARM64_HAS_VIRT_HOST_EXTN)
		     : "+r" (v) : "r" (offset));
	return v;
}

#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))

#define read_sysreg_elx(r,nvh,vh)					\
	({								\
		u64 reg;						\
		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
					 "mrs_s %0, " __stringify(r##vh),\
					 ARM64_HAS_VIRT_HOST_EXTN)	\
			     : "=r" (reg));				\
		reg;							\
	})

#define write_sysreg_elx(v,r,nvh,vh)					\
	do {								\
		u64 __val = (u64)(v);					\
		asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
					 "msr_s " __stringify(r##vh) ", %x0",\
					 ARM64_HAS_VIRT_HOST_EXTN)	\
					 : : "rZ" (__val));		\
	} while (0)

/*
 * Unified accessors for registers that have a different encoding
 * between VHE and non-VHE. They must be specified without their "ELx"
 * encoding.
 */
#define read_sysreg_el2(r)						\
	({								\
		u64 reg;						\
		asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
					 "mrs %0, " __stringify(r##_EL1),\
					 ARM64_HAS_VIRT_HOST_EXTN)	\
			     : "=r" (reg));				\
		reg;							\
	})

#define write_sysreg_el2(v,r)						\
	do {								\
		u64 __val = (u64)(v);					\
		asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
					 "msr " __stringify(r##_EL1) ", %x0",\
					 ARM64_HAS_VIRT_HOST_EXTN)	\
					 : : "rZ" (__val));		\
	} while (0)

#define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
#define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
#define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
#define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)

/* The VHE specific system registers and their encoding */
#define sctlr_EL12              sys_reg(3, 5, 1, 0, 0)
#define cpacr_EL12              sys_reg(3, 5, 1, 0, 2)
#define ttbr0_EL12              sys_reg(3, 5, 2, 0, 0)
#define ttbr1_EL12              sys_reg(3, 5, 2, 0, 1)
#define tcr_EL12                sys_reg(3, 5, 2, 0, 2)
#define afsr0_EL12              sys_reg(3, 5, 5, 1, 0)
#define afsr1_EL12              sys_reg(3, 5, 5, 1, 1)
#define esr_EL12                sys_reg(3, 5, 5, 2, 0)
#define far_EL12                sys_reg(3, 5, 6, 0, 0)
#define mair_EL12               sys_reg(3, 5, 10, 2, 0)
#define amair_EL12              sys_reg(3, 5, 10, 3, 0)
#define vbar_EL12               sys_reg(3, 5, 12, 0, 0)
#define contextidr_EL12         sys_reg(3, 5, 13, 0, 1)
#define cntkctl_EL12            sys_reg(3, 5, 14, 1, 0)
#define cntp_tval_EL02          sys_reg(3, 5, 14, 2, 0)
#define cntp_ctl_EL02           sys_reg(3, 5, 14, 2, 1)
#define cntp_cval_EL02          sys_reg(3, 5, 14, 2, 2)
#define cntv_tval_EL02          sys_reg(3, 5, 14, 3, 0)
#define cntv_ctl_EL02           sys_reg(3, 5, 14, 3, 1)
#define cntv_cval_EL02          sys_reg(3, 5, 14, 3, 2)
#define spsr_EL12               sys_reg(3, 5, 4, 0, 0)
#define elr_EL12                sys_reg(3, 5, 4, 0, 1)

/**
 * hyp_alternate_select - Generates patchable code sequences that are
 * used to switch between two implementations of a function, depending
 * on the availability of a feature.
 *
 * @fname: a symbol name that will be defined as a function returning a
 * function pointer whose type will match @orig and @alt
 * @orig: A pointer to the default function, as returned by @fname when
 * @cond doesn't hold
 * @alt: A pointer to the alternate function, as returned by @fname
 * when @cond holds
 * @cond: a CPU feature (as described in asm/cpufeature.h)
 */
#define hyp_alternate_select(fname, orig, alt, cond)			\
typeof(orig) * __hyp_text fname(void)					\
{									\
	typeof(alt) *val = orig;					\
	asm volatile(ALTERNATIVE("nop		\n",			\
				 "mov	%0, %1	\n",			\
				 cond)					\
		     : "+r" (val) : "r" (alt));				\
	return val;							\
}

void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);

void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);

void __timer_save_state(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu);

void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
void __sysreg32_save_state(struct kvm_vcpu *vcpu);
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);

void __debug_save_state(struct kvm_vcpu *vcpu,
			struct kvm_guest_debug_arch *dbg,
			struct kvm_cpu_context *ctxt);
void __debug_restore_state(struct kvm_vcpu *vcpu,
			   struct kvm_guest_debug_arch *dbg,
			   struct kvm_cpu_context *ctxt);
void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);

void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
bool __fpsimd_enabled(void);

u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn __hyp_do_panic(unsigned long, ...);

#endif /* __ARM64_KVM_HYP_H__ */