1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_SVM_OPS_H
#define __KVM_X86_SVM_OPS_H
#include <linux/compiler_types.h>
#include "x86.h"
#define svm_asm(insn, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) "\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
::: clobber : fault); \
return; \
fault: \
kvm_spurious_fault(); \
} while (0)
#define svm_asm1(insn, op1, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1 : clobber : fault); \
return; \
fault: \
kvm_spurious_fault(); \
} while (0)
#define svm_asm2(insn, op1, op2, clobber...) \
do { \
asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
_ASM_EXTABLE(1b, %l[fault]) \
:: op1, op2 : clobber : fault); \
return; \
fault: \
kvm_spurious_fault(); \
} while (0)
static inline void clgi(void)
{
svm_asm(clgi);
}
static inline void stgi(void)
{
svm_asm(stgi);
}
static inline void invlpga(unsigned long addr, u32 asid)
{
svm_asm2(invlpga, "c"(asid), "a"(addr));
}
/*
* Despite being a physical address, the portion of rAX that is consumed by
* VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
* hence 'unsigned long' instead of 'hpa_t'.
*/
static inline void vmsave(unsigned long pa)
{
svm_asm1(vmsave, "a" (pa), "memory");
}
static inline void vmload(unsigned long pa)
{
svm_asm1(vmload, "a" (pa), "memory");
}
#endif /* __KVM_X86_SVM_OPS_H */
|