summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/net/bpf_jit.h
blob: c335dc4eed370e6b8cedfc995bd96c6e323fd505 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * BPF JIT compiler for LoongArch
 *
 * Copyright (C) 2022 Loongson Technology Corporation Limited
 */
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>

struct jit_ctx {
	const struct bpf_prog *prog;
	unsigned int idx;
	unsigned int flags;
	unsigned int epilogue_offset;
	u32 *offset;
	int num_exentries;
	union loongarch_instruction *image;
	u32 stack_size;
};

struct jit_data {
	struct bpf_binary_header *header;
	u8 *image;
	struct jit_ctx ctx;
};

#define emit_insn(ctx, func, ...)						\
do {										\
	if (ctx->image != NULL) {						\
		union loongarch_instruction *insn = &ctx->image[ctx->idx];	\
		emit_##func(insn, ##__VA_ARGS__);				\
	}									\
	ctx->idx++;								\
} while (0)

#define is_signed_imm12(val)	signed_imm_check(val, 12)
#define is_signed_imm14(val)	signed_imm_check(val, 14)
#define is_signed_imm16(val)	signed_imm_check(val, 16)
#define is_signed_imm26(val)	signed_imm_check(val, 26)
#define is_signed_imm32(val)	signed_imm_check(val, 32)
#define is_signed_imm52(val)	signed_imm_check(val, 52)
#define is_unsigned_imm12(val)	unsigned_imm_check(val, 12)

static inline int bpf2la_offset(int bpf_insn, int off, const struct jit_ctx *ctx)
{
	/* BPF JMP offset is relative to the next instruction */
	bpf_insn++;
	/*
	 * Whereas LoongArch branch instructions encode the offset
	 * from the branch itself, so we must subtract 1 from the
	 * instruction offset.
	 */
	return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
}

static inline int epilogue_offset(const struct jit_ctx *ctx)
{
	int from = ctx->idx;
	int to = ctx->epilogue_offset;

	return (to - from);
}

/* Zero-extend 32 bits into 64 bits */
static inline void emit_zext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
{
	if (!is32)
		return;

	emit_insn(ctx, lu32id, reg, 0);
}

/* Signed-extend 32 bits into 64 bits */
static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, bool is32)
{
	if (!is32)
		return;

	emit_insn(ctx, addiw, reg, reg, 0);
}

static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
{
	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;

	/* lu12iw rd, imm_31_12 */
	imm_31_12 = (addr >> 12) & 0xfffff;
	emit_insn(ctx, lu12iw, rd, imm_31_12);

	/* ori rd, rd, imm_11_0 */
	imm_11_0 = addr & 0xfff;
	emit_insn(ctx, ori, rd, rd, imm_11_0);

	/* lu32id rd, imm_51_32 */
	imm_51_32 = (addr >> 32) & 0xfffff;
	emit_insn(ctx, lu32id, rd, imm_51_32);

	/* lu52id rd, rd, imm_63_52 */
	imm_63_52 = (addr >> 52) & 0xfff;
	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
}

static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
{
	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;

	/* or rd, $zero, $zero */
	if (imm == 0) {
		emit_insn(ctx, or, rd, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_ZERO);
		return;
	}

	/* addiw rd, $zero, imm_11_0 */
	if (is_signed_imm12(imm)) {
		emit_insn(ctx, addiw, rd, LOONGARCH_GPR_ZERO, imm);
		goto zext;
	}

	/* ori rd, $zero, imm_11_0 */
	if (is_unsigned_imm12(imm)) {
		emit_insn(ctx, ori, rd, LOONGARCH_GPR_ZERO, imm);
		goto zext;
	}

	/* lu52id rd, $zero, imm_63_52 */
	imm_63_52 = (imm >> 52) & 0xfff;
	imm_51_0 = imm & 0xfffffffffffff;
	if (imm_63_52 != 0 && imm_51_0 == 0) {
		emit_insn(ctx, lu52id, rd, LOONGARCH_GPR_ZERO, imm_63_52);
		return;
	}

	/* lu12iw rd, imm_31_12 */
	imm_31_12 = (imm >> 12) & 0xfffff;
	emit_insn(ctx, lu12iw, rd, imm_31_12);

	/* ori rd, rd, imm_11_0 */
	imm_11_0 = imm & 0xfff;
	if (imm_11_0 != 0)
		emit_insn(ctx, ori, rd, rd, imm_11_0);

	if (!is_signed_imm32(imm)) {
		if (imm_51_0 != 0) {
			/*
			 * If bit[51:31] is all 0 or all 1,
			 * it means bit[51:32] is sign extended by lu12iw,
			 * no need to call lu32id to do a new filled operation.
			 */
			imm_51_31 = (imm >> 31) & 0x1fffff;
			if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) {
				/* lu32id rd, imm_51_32 */
				imm_51_32 = (imm >> 32) & 0xfffff;
				emit_insn(ctx, lu32id, rd, imm_51_32);
			}
		}

		/* lu52id rd, rd, imm_63_52 */
		if (!is_signed_imm52(imm))
			emit_insn(ctx, lu52id, rd, rd, imm_63_52);
	}

zext:
	emit_zext_32(ctx, rd, is32);
}

static inline void move_reg(struct jit_ctx *ctx, enum loongarch_gpr rd,
			    enum loongarch_gpr rj)
{
	emit_insn(ctx, or, rd, rj, LOONGARCH_GPR_ZERO);
}

static inline int invert_jmp_cond(u8 cond)
{
	switch (cond) {
	case BPF_JEQ:
		return BPF_JNE;
	case BPF_JNE:
	case BPF_JSET:
		return BPF_JEQ;
	case BPF_JGT:
		return BPF_JLE;
	case BPF_JGE:
		return BPF_JLT;
	case BPF_JLT:
		return BPF_JGE;
	case BPF_JLE:
		return BPF_JGT;
	case BPF_JSGT:
		return BPF_JSLE;
	case BPF_JSGE:
		return BPF_JSLT;
	case BPF_JSLT:
		return BPF_JSGE;
	case BPF_JSLE:
		return BPF_JSGT;
	}
	return -1;
}

static inline void cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
				   enum loongarch_gpr rd, int jmp_offset)
{
	switch (cond) {
	case BPF_JEQ:
		/* PC += jmp_offset if rj == rd */
		emit_insn(ctx, beq, rj, rd, jmp_offset);
		return;
	case BPF_JNE:
	case BPF_JSET:
		/* PC += jmp_offset if rj != rd */
		emit_insn(ctx, bne, rj, rd, jmp_offset);
		return;
	case BPF_JGT:
		/* PC += jmp_offset if rj > rd (unsigned) */
		emit_insn(ctx, bltu, rd, rj, jmp_offset);
		return;
	case BPF_JLT:
		/* PC += jmp_offset if rj < rd (unsigned) */
		emit_insn(ctx, bltu, rj, rd, jmp_offset);
		return;
	case BPF_JGE:
		/* PC += jmp_offset if rj >= rd (unsigned) */
		emit_insn(ctx, bgeu, rj, rd, jmp_offset);
		return;
	case BPF_JLE:
		/* PC += jmp_offset if rj <= rd (unsigned) */
		emit_insn(ctx, bgeu, rd, rj, jmp_offset);
		return;
	case BPF_JSGT:
		/* PC += jmp_offset if rj > rd (signed) */
		emit_insn(ctx, blt, rd, rj, jmp_offset);
		return;
	case BPF_JSLT:
		/* PC += jmp_offset if rj < rd (signed) */
		emit_insn(ctx, blt, rj, rd, jmp_offset);
		return;
	case BPF_JSGE:
		/* PC += jmp_offset if rj >= rd (signed) */
		emit_insn(ctx, bge, rj, rd, jmp_offset);
		return;
	case BPF_JSLE:
		/* PC += jmp_offset if rj <= rd (signed) */
		emit_insn(ctx, bge, rd, rj, jmp_offset);
		return;
	}
}

static inline void cond_jmp_offs26(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
				   enum loongarch_gpr rd, int jmp_offset)
{
	cond = invert_jmp_cond(cond);
	cond_jmp_offset(ctx, cond, rj, rd, 2);
	emit_insn(ctx, b, jmp_offset);
}

static inline void uncond_jmp_offs26(struct jit_ctx *ctx, int jmp_offset)
{
	emit_insn(ctx, b, jmp_offset);
}

static inline int emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
				enum loongarch_gpr rd, int jmp_offset)
{
	/*
	 * A large PC-relative jump offset may overflow the immediate field of
	 * the native conditional branch instruction, triggering a conversion
	 * to use an absolute jump instead, this jump sequence is particularly
	 * nasty. For now, use cond_jmp_offs26() directly to keep it simple.
	 * In the future, maybe we can add support for far branching, the branch
	 * relaxation requires more than two passes to converge, the code seems
	 * too complex to understand, not quite sure whether it is necessary and
	 * worth the extra pain. Anyway, just leave it as it is to enhance code
	 * readability now.
	 */
	if (is_signed_imm26(jmp_offset)) {
		cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset);
		return 0;
	}

	return -EINVAL;
}

static inline int emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
{
	if (is_signed_imm26(jmp_offset)) {
		uncond_jmp_offs26(ctx, jmp_offset);
		return 0;
	}

	return -EINVAL;
}

static inline int emit_tailcall_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj,
				    enum loongarch_gpr rd, int jmp_offset)
{
	if (is_signed_imm16(jmp_offset)) {
		cond_jmp_offset(ctx, cond, rj, rd, jmp_offset);
		return 0;
	}

	return -EINVAL;
}