summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/include/asm/kvm_vcpu.h
blob: 2c349f961bfbcf5b2ed5983bc6c272b37957aa2b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
 */

#ifndef __ASM_LOONGARCH_KVM_VCPU_H__
#define __ASM_LOONGARCH_KVM_VCPU_H__

#include <linux/kvm_host.h>
#include <asm/loongarch.h>

/* Controlled by 0x5 guest estat */
#define CPU_SIP0			(_ULCAST_(1))
#define CPU_SIP1			(_ULCAST_(1) << 1)
#define CPU_PMU				(_ULCAST_(1) << 10)
#define CPU_TIMER			(_ULCAST_(1) << 11)
#define CPU_IPI				(_ULCAST_(1) << 12)

/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */
#define CPU_IP0				(_ULCAST_(1))
#define CPU_IP1				(_ULCAST_(1) << 1)
#define CPU_IP2				(_ULCAST_(1) << 2)
#define CPU_IP3				(_ULCAST_(1) << 3)
#define CPU_IP4				(_ULCAST_(1) << 4)
#define CPU_IP5				(_ULCAST_(1) << 5)
#define CPU_IP6				(_ULCAST_(1) << 6)
#define CPU_IP7				(_ULCAST_(1) << 7)

#define MNSEC_PER_SEC			(NSEC_PER_SEC >> 20)

/* KVM_IRQ_LINE irq field index values */
#define KVM_LOONGSON_IRQ_TYPE_SHIFT	24
#define KVM_LOONGSON_IRQ_TYPE_MASK	0xff
#define KVM_LOONGSON_IRQ_VCPU_SHIFT	16
#define KVM_LOONGSON_IRQ_VCPU_MASK	0xff
#define KVM_LOONGSON_IRQ_NUM_SHIFT	0
#define KVM_LOONGSON_IRQ_NUM_MASK	0xffff

typedef union loongarch_instruction  larch_inst;
typedef int (*exit_handle_fn)(struct kvm_vcpu *);

int  kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst);
int  kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst);
int  kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
int  kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run);
int  kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run);
int  kvm_emu_idle(struct kvm_vcpu *vcpu);
int  kvm_pending_timer(struct kvm_vcpu *vcpu);
int  kvm_handle_fault(struct kvm_vcpu *vcpu, int fault);
void kvm_deliver_intr(struct kvm_vcpu *vcpu);
void kvm_deliver_exception(struct kvm_vcpu *vcpu);

void kvm_own_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
void kvm_save_fpu(struct loongarch_fpu *fpu);
void kvm_restore_fpu(struct loongarch_fpu *fpu);
void kvm_restore_fcsr(struct loongarch_fpu *fpu);

#ifdef CONFIG_CPU_HAS_LSX
int kvm_own_lsx(struct kvm_vcpu *vcpu);
void kvm_save_lsx(struct loongarch_fpu *fpu);
void kvm_restore_lsx(struct loongarch_fpu *fpu);
#else
static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
#endif

#ifdef CONFIG_CPU_HAS_LASX
int kvm_own_lasx(struct kvm_vcpu *vcpu);
void kvm_save_lasx(struct loongarch_fpu *fpu);
void kvm_restore_lasx(struct loongarch_fpu *fpu);
#else
static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
#endif

#ifdef CONFIG_CPU_HAS_LBT
int kvm_own_lbt(struct kvm_vcpu *vcpu);
#else
static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; }
#endif

void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
void kvm_save_timer(struct kvm_vcpu *vcpu);
void kvm_restore_timer(struct kvm_vcpu *vcpu);

int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid);

/*
 * Loongarch KVM guest interrupt handling
 */
static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
{
	set_bit(irq, &vcpu->arch.irq_pending);
	clear_bit(irq, &vcpu->arch.irq_clear);
}

static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq)
{
	clear_bit(irq, &vcpu->arch.irq_pending);
	set_bit(irq, &vcpu->arch.irq_clear);
}

static inline int kvm_queue_exception(struct kvm_vcpu *vcpu,
			unsigned int code, unsigned int subcode)
{
	/* only one exception can be injected */
	if (!vcpu->arch.exception_pending) {
		set_bit(code, &vcpu->arch.exception_pending);
		vcpu->arch.esubcode = subcode;
		return 0;
	} else
		return -1;
}

static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num)
{
	return vcpu->arch.gprs[num];
}

static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val)
{
	vcpu->arch.gprs[num] = val;
}

static inline bool kvm_pvtime_supported(void)
{
	return !!sched_info_on();
}

static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature)
{
	return vcpu->kvm->arch.pv_features & BIT(feature);
}

#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */