summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kernel/paravirt.c
blob: 1633ed4f692f603c26dea62bea9c5b534ba92a74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/jump_label.h>
#include <linux/kvm_para.h>
#include <linux/static_call.h>
#include <asm/paravirt.h>

struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;

static u64 native_steal_clock(int cpu)
{
	return 0;
}

DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);

#ifdef CONFIG_SMP
static void pv_send_ipi_single(int cpu, unsigned int action)
{
	int min, old;
	irq_cpustat_t *info = &per_cpu(irq_stat, cpu);

	old = atomic_fetch_or(BIT(action), &info->message);
	if (old)
		return;

	min = cpu_logical_map(cpu);
	kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
}

#define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)

static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
	int i, cpu, min = 0, max = 0, old;
	__uint128_t bitmap = 0;
	irq_cpustat_t *info;

	if (cpumask_empty(mask))
		return;

	action = BIT(action);
	for_each_cpu(i, mask) {
		info = &per_cpu(irq_stat, i);
		old = atomic_fetch_or(action, &info->message);
		if (old)
			continue;

		cpu = cpu_logical_map(i);
		if (!bitmap) {
			min = max = cpu;
		} else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
			/* cpu < min, and bitmap still enough */
			bitmap <<= min - cpu;
			min = cpu;
		} else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
			/* cpu > min, and bitmap still enough */
			max = cpu > max ? cpu : max;
		} else {
			/*
			 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
			 * send IPI here directly and skip the remaining CPUs.
			 */
			kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
				      (unsigned long)(bitmap >> BITS_PER_LONG), min);
			min = max = cpu;
			bitmap = 0;
		}
		__set_bit(cpu - min, (unsigned long *)&bitmap);
	}

	if (bitmap)
		kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
			      (unsigned long)(bitmap >> BITS_PER_LONG), min);
}

static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
{
	u32 action;
	irq_cpustat_t *info;

	/* Clear SWI interrupt */
	clear_csr_estat(1 << INT_SWI0);
	info = this_cpu_ptr(&irq_stat);
	action = atomic_xchg(&info->message, 0);

	if (action & SMP_RESCHEDULE) {
		scheduler_ipi();
		info->ipi_irqs[IPI_RESCHEDULE]++;
	}

	if (action & SMP_CALL_FUNCTION) {
		generic_smp_call_function_interrupt();
		info->ipi_irqs[IPI_CALL_FUNCTION]++;
	}

	return IRQ_HANDLED;
}

static void pv_init_ipi(void)
{
	int r, swi;

	swi = get_percpu_irq(INT_SWI0);
	if (swi < 0)
		panic("SWI0 IRQ mapping failed\n");
	irq_set_percpu_devid(swi);
	r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
	if (r < 0)
		panic("SWI0 IRQ request failed\n");
}
#endif

static bool kvm_para_available(void)
{
	int config;
	static int hypervisor_type;

	if (!hypervisor_type) {
		config = read_cpucfg(CPUCFG_KVM_SIG);
		if (!memcmp(&config, KVM_SIGNATURE, 4))
			hypervisor_type = HYPERVISOR_KVM;
	}

	return hypervisor_type == HYPERVISOR_KVM;
}

int __init pv_ipi_init(void)
{
	int feature;

	if (!cpu_has_hypervisor)
		return 0;
	if (!kvm_para_available())
		return 0;

	feature = read_cpucfg(CPUCFG_KVM_FEATURE);
	if (!(feature & KVM_FEATURE_IPI))
		return 0;

#ifdef CONFIG_SMP
	mp_ops.init_ipi		= pv_init_ipi;
	mp_ops.send_ipi_single	= pv_send_ipi_single;
	mp_ops.send_ipi_mask	= pv_send_ipi_mask;
#endif

	return 0;
}