1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Just-In-Time compiler for eBPF bytecode on MIPS. |
4 | * Implementation of JIT functions for 64-bit CPUs. |
5 | * |
6 | * Copyright (c) 2021 Anyfi Networks AB. |
7 | * Author: Johan Almbladh <johan.almbladh@gmail.com> |
8 | * |
9 | * Based on code and ideas from |
10 | * Copyright (c) 2017 Cavium, Inc. |
11 | * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com> |
12 | * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> |
13 | */ |
14 | |
15 | #include <linux/errno.h> |
16 | #include <linux/filter.h> |
17 | #include <linux/bpf.h> |
18 | #include <asm/cpu-features.h> |
19 | #include <asm/isa-rev.h> |
20 | #include <asm/uasm.h> |
21 | |
22 | #include "bpf_jit_comp.h" |
23 | |
24 | /* MIPS t0-t3 are not available in the n64 ABI */ |
25 | #undef MIPS_R_T0 |
26 | #undef MIPS_R_T1 |
27 | #undef MIPS_R_T2 |
28 | #undef MIPS_R_T3 |
29 | |
30 | /* Stack is 16-byte aligned in n64 ABI */ |
31 | #define MIPS_STACK_ALIGNMENT 16 |
32 | |
33 | /* Extra 64-bit eBPF registers used by JIT */ |
34 | #define JIT_REG_TC (MAX_BPF_JIT_REG + 0) |
35 | #define JIT_REG_ZX (MAX_BPF_JIT_REG + 1) |
36 | |
37 | /* Number of prologue bytes to skip when doing a tail call */ |
38 | #define JIT_TCALL_SKIP 4 |
39 | |
40 | /* Callee-saved CPU registers that the JIT must preserve */ |
41 | #define JIT_CALLEE_REGS \ |
42 | (BIT(MIPS_R_S0) | \ |
43 | BIT(MIPS_R_S1) | \ |
44 | BIT(MIPS_R_S2) | \ |
45 | BIT(MIPS_R_S3) | \ |
46 | BIT(MIPS_R_S4) | \ |
47 | BIT(MIPS_R_S5) | \ |
48 | BIT(MIPS_R_S6) | \ |
49 | BIT(MIPS_R_S7) | \ |
50 | BIT(MIPS_R_GP) | \ |
51 | BIT(MIPS_R_FP) | \ |
52 | BIT(MIPS_R_RA)) |
53 | |
54 | /* Caller-saved CPU registers available for JIT use */ |
55 | #define JIT_CALLER_REGS \ |
56 | (BIT(MIPS_R_A5) | \ |
57 | BIT(MIPS_R_A6) | \ |
58 | BIT(MIPS_R_A7)) |
59 | /* |
60 | * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. |
61 | * MIPS registers t4 - t7 may be used by the JIT as temporary registers. |
62 | * MIPS registers t8 - t9 are reserved for single-register common functions. |
63 | */ |
64 | static const u8 bpf2mips64[] = { |
65 | /* Return value from in-kernel function, and exit value from eBPF */ |
66 | [BPF_REG_0] = MIPS_R_V0, |
67 | /* Arguments from eBPF program to in-kernel function */ |
68 | [BPF_REG_1] = MIPS_R_A0, |
69 | [BPF_REG_2] = MIPS_R_A1, |
70 | [BPF_REG_3] = MIPS_R_A2, |
71 | [BPF_REG_4] = MIPS_R_A3, |
72 | [BPF_REG_5] = MIPS_R_A4, |
73 | /* Callee-saved registers that in-kernel function will preserve */ |
74 | [BPF_REG_6] = MIPS_R_S0, |
75 | [BPF_REG_7] = MIPS_R_S1, |
76 | [BPF_REG_8] = MIPS_R_S2, |
77 | [BPF_REG_9] = MIPS_R_S3, |
78 | /* Read-only frame pointer to access the eBPF stack */ |
79 | [BPF_REG_FP] = MIPS_R_FP, |
80 | /* Temporary register for blinding constants */ |
81 | [BPF_REG_AX] = MIPS_R_AT, |
82 | /* Tail call count register, caller-saved */ |
83 | [JIT_REG_TC] = MIPS_R_A5, |
84 | /* Constant for register zero-extension */ |
85 | [JIT_REG_ZX] = MIPS_R_V1, |
86 | }; |
87 | |
88 | /* |
89 | * MIPS 32-bit operations on 64-bit registers generate a sign-extended |
90 | * result. However, the eBPF ISA mandates zero-extension, so we rely on the |
91 | * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic |
92 | * operations, right shift and byte swap require properly sign-extended |
93 | * operands or the result is unpredictable. We emit explicit sign-extensions |
94 | * in those cases. |
95 | */ |
96 | |
97 | /* Sign extension */ |
98 | static void emit_sext(struct jit_context *ctx, u8 dst, u8 src) |
99 | { |
100 | emit(ctx, sll, dst, src, 0); |
101 | clobber_reg(ctx, reg: dst); |
102 | } |
103 | |
104 | /* Zero extension */ |
105 | static void emit_zext(struct jit_context *ctx, u8 dst) |
106 | { |
107 | if (cpu_has_mips64r2 || cpu_has_mips64r6) { |
108 | emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); |
109 | } else { |
110 | emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]); |
111 | access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */ |
112 | } |
113 | clobber_reg(ctx, reg: dst); |
114 | } |
115 | |
116 | /* Zero extension, if verifier does not do it for us */ |
117 | static void emit_zext_ver(struct jit_context *ctx, u8 dst) |
118 | { |
119 | if (!ctx->program->aux->verifier_zext) |
120 | emit_zext(ctx, dst); |
121 | } |
122 | |
123 | /* dst = imm (64-bit) */ |
124 | static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64) |
125 | { |
126 | if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) { |
127 | emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64); |
128 | } else if (imm64 >= 0xffffffff80000000ULL || |
129 | (imm64 < 0x80000000 && imm64 > 0xffff)) { |
130 | emit(ctx, lui, dst, (s16)(imm64 >> 16)); |
131 | emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff); |
132 | } else { |
133 | u8 acc = MIPS_R_ZERO; |
134 | int shift = 0; |
135 | int k; |
136 | |
137 | for (k = 0; k < 4; k++) { |
138 | u16 half = imm64 >> (48 - 16 * k); |
139 | |
140 | if (acc == dst) |
141 | shift += 16; |
142 | |
143 | if (half) { |
144 | if (shift) |
145 | emit(ctx, dsll_safe, dst, dst, shift); |
146 | emit(ctx, ori, dst, acc, half); |
147 | acc = dst; |
148 | shift = 0; |
149 | } |
150 | } |
151 | if (shift) |
152 | emit(ctx, dsll_safe, dst, dst, shift); |
153 | } |
154 | clobber_reg(ctx, reg: dst); |
155 | } |
156 | |
157 | /* ALU immediate operation (64-bit) */ |
158 | static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op) |
159 | { |
160 | switch (BPF_OP(op)) { |
161 | /* dst = dst | imm */ |
162 | case BPF_OR: |
163 | emit(ctx, ori, dst, dst, (u16)imm); |
164 | break; |
165 | /* dst = dst ^ imm */ |
166 | case BPF_XOR: |
167 | emit(ctx, xori, dst, dst, (u16)imm); |
168 | break; |
169 | /* dst = -dst */ |
170 | case BPF_NEG: |
171 | emit(ctx, dsubu, dst, MIPS_R_ZERO, dst); |
172 | break; |
173 | /* dst = dst << imm */ |
174 | case BPF_LSH: |
175 | emit(ctx, dsll_safe, dst, dst, imm); |
176 | break; |
177 | /* dst = dst >> imm */ |
178 | case BPF_RSH: |
179 | emit(ctx, dsrl_safe, dst, dst, imm); |
180 | break; |
181 | /* dst = dst >> imm (arithmetic) */ |
182 | case BPF_ARSH: |
183 | emit(ctx, dsra_safe, dst, dst, imm); |
184 | break; |
185 | /* dst = dst + imm */ |
186 | case BPF_ADD: |
187 | emit(ctx, daddiu, dst, dst, imm); |
188 | break; |
189 | /* dst = dst - imm */ |
190 | case BPF_SUB: |
191 | emit(ctx, daddiu, dst, dst, -imm); |
192 | break; |
193 | default: |
194 | /* Width-generic operations */ |
195 | emit_alu_i(ctx, dst, imm, op); |
196 | } |
197 | clobber_reg(ctx, reg: dst); |
198 | } |
199 | |
200 | /* ALU register operation (64-bit) */ |
201 | static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op) |
202 | { |
203 | switch (BPF_OP(op)) { |
204 | /* dst = dst << src */ |
205 | case BPF_LSH: |
206 | emit(ctx, dsllv, dst, dst, src); |
207 | break; |
208 | /* dst = dst >> src */ |
209 | case BPF_RSH: |
210 | emit(ctx, dsrlv, dst, dst, src); |
211 | break; |
212 | /* dst = dst >> src (arithmetic) */ |
213 | case BPF_ARSH: |
214 | emit(ctx, dsrav, dst, dst, src); |
215 | break; |
216 | /* dst = dst + src */ |
217 | case BPF_ADD: |
218 | emit(ctx, daddu, dst, dst, src); |
219 | break; |
220 | /* dst = dst - src */ |
221 | case BPF_SUB: |
222 | emit(ctx, dsubu, dst, dst, src); |
223 | break; |
224 | /* dst = dst * src */ |
225 | case BPF_MUL: |
226 | if (cpu_has_mips64r6) { |
227 | emit(ctx, dmulu, dst, dst, src); |
228 | } else { |
229 | emit(ctx, dmultu, dst, src); |
230 | emit(ctx, mflo, dst); |
231 | /* Ensure multiplication is completed */ |
232 | if (IS_ENABLED(CONFIG_CPU_R4000_WORKAROUNDS)) |
233 | emit(ctx, mfhi, MIPS_R_ZERO); |
234 | } |
235 | break; |
236 | /* dst = dst / src */ |
237 | case BPF_DIV: |
238 | if (cpu_has_mips64r6) { |
239 | emit(ctx, ddivu_r6, dst, dst, src); |
240 | } else { |
241 | emit(ctx, ddivu, dst, src); |
242 | emit(ctx, mflo, dst); |
243 | } |
244 | break; |
245 | /* dst = dst % src */ |
246 | case BPF_MOD: |
247 | if (cpu_has_mips64r6) { |
248 | emit(ctx, dmodu, dst, dst, src); |
249 | } else { |
250 | emit(ctx, ddivu, dst, src); |
251 | emit(ctx, mfhi, dst); |
252 | } |
253 | break; |
254 | default: |
255 | /* Width-generic operations */ |
256 | emit_alu_r(ctx, dst, src, op); |
257 | } |
258 | clobber_reg(ctx, reg: dst); |
259 | } |
260 | |
261 | /* Swap sub words in a register double word */ |
262 | static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits) |
263 | { |
264 | u8 tmp = MIPS_R_T9; |
265 | |
266 | emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */ |
267 | emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */ |
268 | emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */ |
269 | emit(ctx, and, dst, dst, mask); /* dst = dst & mask */ |
270 | emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */ |
271 | } |
272 | |
273 | /* Swap bytes and truncate a register double word, word or half word */ |
274 | static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width) |
275 | { |
276 | switch (width) { |
277 | /* Swap bytes in a double word */ |
278 | case 64: |
279 | if (cpu_has_mips64r2 || cpu_has_mips64r6) { |
280 | emit(ctx, dsbh, dst, dst); |
281 | emit(ctx, dshd, dst, dst); |
282 | } else { |
283 | u8 t1 = MIPS_R_T6; |
284 | u8 t2 = MIPS_R_T7; |
285 | |
286 | emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */ |
287 | emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */ |
288 | emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */ |
289 | |
290 | emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff); |
291 | emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ |
292 | emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ |
293 | emit_swap_r64(ctx, dst, mask: t1, bits: 16);/* dst = swap16(dst) */ |
294 | |
295 | emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */ |
296 | emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */ |
297 | emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */ |
298 | emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */ |
299 | emit_swap_r64(ctx, dst, mask: t1, bits: 8); /* dst = swap8(dst) */ |
300 | } |
301 | break; |
302 | /* Swap bytes in a half word */ |
303 | /* Swap bytes in a word */ |
304 | case 32: |
305 | case 16: |
306 | emit_sext(ctx, dst, src: dst); |
307 | emit_bswap_r(ctx, dst, width); |
308 | if (cpu_has_mips64r2 || cpu_has_mips64r6) |
309 | emit_zext(ctx, dst); |
310 | break; |
311 | } |
312 | clobber_reg(ctx, reg: dst); |
313 | } |
314 | |
315 | /* Truncate a register double word, word or half word */ |
316 | static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width) |
317 | { |
318 | switch (width) { |
319 | case 64: |
320 | break; |
321 | /* Zero-extend a word */ |
322 | case 32: |
323 | emit_zext(ctx, dst); |
324 | break; |
325 | /* Zero-extend a half word */ |
326 | case 16: |
327 | emit(ctx, andi, dst, dst, 0xffff); |
328 | break; |
329 | } |
330 | clobber_reg(ctx, reg: dst); |
331 | } |
332 | |
333 | /* Load operation: dst = *(size*)(src + off) */ |
334 | static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) |
335 | { |
336 | switch (size) { |
337 | /* Load a byte */ |
338 | case BPF_B: |
339 | emit(ctx, lbu, dst, off, src); |
340 | break; |
341 | /* Load a half word */ |
342 | case BPF_H: |
343 | emit(ctx, lhu, dst, off, src); |
344 | break; |
345 | /* Load a word */ |
346 | case BPF_W: |
347 | emit(ctx, lwu, dst, off, src); |
348 | break; |
349 | /* Load a double word */ |
350 | case BPF_DW: |
351 | emit(ctx, ld, dst, off, src); |
352 | break; |
353 | } |
354 | clobber_reg(ctx, reg: dst); |
355 | } |
356 | |
357 | /* Store operation: *(size *)(dst + off) = src */ |
358 | static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size) |
359 | { |
360 | switch (size) { |
361 | /* Store a byte */ |
362 | case BPF_B: |
363 | emit(ctx, sb, src, off, dst); |
364 | break; |
365 | /* Store a half word */ |
366 | case BPF_H: |
367 | emit(ctx, sh, src, off, dst); |
368 | break; |
369 | /* Store a word */ |
370 | case BPF_W: |
371 | emit(ctx, sw, src, off, dst); |
372 | break; |
373 | /* Store a double word */ |
374 | case BPF_DW: |
375 | emit(ctx, sd, src, off, dst); |
376 | break; |
377 | } |
378 | } |
379 | |
380 | /* Atomic read-modify-write */ |
381 | static void emit_atomic_r64(struct jit_context *ctx, |
382 | u8 dst, u8 src, s16 off, u8 code) |
383 | { |
384 | u8 t1 = MIPS_R_T6; |
385 | u8 t2 = MIPS_R_T7; |
386 | |
387 | LLSC_sync(ctx); |
388 | emit(ctx, lld, t1, off, dst); |
389 | switch (code) { |
390 | case BPF_ADD: |
391 | case BPF_ADD | BPF_FETCH: |
392 | emit(ctx, daddu, t2, t1, src); |
393 | break; |
394 | case BPF_AND: |
395 | case BPF_AND | BPF_FETCH: |
396 | emit(ctx, and, t2, t1, src); |
397 | break; |
398 | case BPF_OR: |
399 | case BPF_OR | BPF_FETCH: |
400 | emit(ctx, or, t2, t1, src); |
401 | break; |
402 | case BPF_XOR: |
403 | case BPF_XOR | BPF_FETCH: |
404 | emit(ctx, xor, t2, t1, src); |
405 | break; |
406 | case BPF_XCHG: |
407 | emit(ctx, move, t2, src); |
408 | break; |
409 | } |
410 | emit(ctx, scd, t2, off, dst); |
411 | emit(ctx, LLSC_beqz, t2, -16 - LLSC_offset); |
412 | emit(ctx, nop); /* Delay slot */ |
413 | |
414 | if (code & BPF_FETCH) { |
415 | emit(ctx, move, src, t1); |
416 | clobber_reg(ctx, reg: src); |
417 | } |
418 | } |
419 | |
420 | /* Atomic compare-and-exchange */ |
421 | static void emit_cmpxchg_r64(struct jit_context *ctx, u8 dst, u8 src, s16 off) |
422 | { |
423 | u8 r0 = bpf2mips64[BPF_REG_0]; |
424 | u8 t1 = MIPS_R_T6; |
425 | u8 t2 = MIPS_R_T7; |
426 | |
427 | LLSC_sync(ctx); |
428 | emit(ctx, lld, t1, off, dst); |
429 | emit(ctx, bne, t1, r0, 12); |
430 | emit(ctx, move, t2, src); /* Delay slot */ |
431 | emit(ctx, scd, t2, off, dst); |
432 | emit(ctx, LLSC_beqz, t2, -20 - LLSC_offset); |
433 | emit(ctx, move, r0, t1); /* Delay slot */ |
434 | |
435 | clobber_reg(ctx, reg: r0); |
436 | } |
437 | |
438 | /* Function call */ |
439 | static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn) |
440 | { |
441 | u8 zx = bpf2mips64[JIT_REG_ZX]; |
442 | u8 tmp = MIPS_R_T6; |
443 | bool fixed; |
444 | u64 addr; |
445 | |
446 | /* Decode the call address */ |
447 | if (bpf_jit_get_func_addr(prog: ctx->program, insn, extra_pass: false, |
448 | func_addr: &addr, func_addr_fixed: &fixed) < 0) |
449 | return -1; |
450 | if (!fixed) |
451 | return -1; |
452 | |
453 | /* Push caller-saved registers on stack */ |
454 | push_regs(ctx, mask: ctx->clobbered & JIT_CALLER_REGS, excl: 0, depth: 0); |
455 | |
456 | /* Emit function call */ |
457 | emit_mov_i64(ctx, dst: tmp, imm64: addr & JALR_MASK); |
458 | emit(ctx, jalr, MIPS_R_RA, tmp); |
459 | emit(ctx, nop); /* Delay slot */ |
460 | |
461 | /* Restore caller-saved registers */ |
462 | pop_regs(ctx, mask: ctx->clobbered & JIT_CALLER_REGS, excl: 0, depth: 0); |
463 | |
464 | /* Re-initialize the JIT zero-extension register if accessed */ |
465 | if (ctx->accessed & BIT(JIT_REG_ZX)) { |
466 | emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); |
467 | emit(ctx, dsrl32, zx, zx, 0); |
468 | } |
469 | |
470 | clobber_reg(ctx, MIPS_R_RA); |
471 | clobber_reg(ctx, MIPS_R_V0); |
472 | clobber_reg(ctx, MIPS_R_V1); |
473 | return 0; |
474 | } |
475 | |
476 | /* Function tail call */ |
477 | static int emit_tail_call(struct jit_context *ctx) |
478 | { |
479 | u8 ary = bpf2mips64[BPF_REG_2]; |
480 | u8 ind = bpf2mips64[BPF_REG_3]; |
481 | u8 tcc = bpf2mips64[JIT_REG_TC]; |
482 | u8 tmp = MIPS_R_T6; |
483 | int off; |
484 | |
485 | /* |
486 | * Tail call: |
487 | * eBPF R1 - function argument (context ptr), passed in a0-a1 |
488 | * eBPF R2 - ptr to object with array of function entry points |
489 | * eBPF R3 - array index of function to be called |
490 | */ |
491 | |
492 | /* if (ind >= ary->map.max_entries) goto out */ |
493 | off = offsetof(struct bpf_array, map.max_entries); |
494 | if (off > 0x7fff) |
495 | return -1; |
496 | emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/ |
497 | emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */ |
498 | emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ |
499 | |
500 | /* if (--TCC < 0) goto out */ |
501 | emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */ |
502 | emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */ |
503 | /* (next insn delay slot) */ |
504 | /* prog = ary->ptrs[ind] */ |
505 | off = offsetof(struct bpf_array, ptrs); |
506 | if (off > 0x7fff) |
507 | return -1; |
508 | emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */ |
509 | emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */ |
510 | emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ |
511 | |
512 | /* if (prog == 0) goto out */ |
513 | emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/ |
514 | emit(ctx, nop); /* Delay slot */ |
515 | |
516 | /* func = prog->bpf_func + 8 (prologue skip offset) */ |
517 | off = offsetof(struct bpf_prog, bpf_func); |
518 | if (off > 0x7fff) |
519 | return -1; |
520 | emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */ |
521 | emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */ |
522 | |
523 | /* goto func */ |
524 | build_epilogue(ctx, dest_reg: tmp); |
525 | access_reg(ctx, JIT_REG_TC); |
526 | return 0; |
527 | } |
528 | |
529 | /* |
530 | * Stack frame layout for a JITed program (stack grows down). |
531 | * |
532 | * Higher address : Previous stack frame : |
533 | * +===========================+ <--- MIPS sp before call |
534 | * | Callee-saved registers, | |
535 | * | including RA and FP | |
536 | * +---------------------------+ <--- eBPF FP (MIPS fp) |
537 | * | Local eBPF variables | |
538 | * | allocated by program | |
539 | * +---------------------------+ |
540 | * | Reserved for caller-saved | |
541 | * | registers | |
542 | * Lower address +===========================+ <--- MIPS sp |
543 | */ |
544 | |
545 | /* Build program prologue to set up the stack and registers */ |
546 | void build_prologue(struct jit_context *ctx) |
547 | { |
548 | u8 fp = bpf2mips64[BPF_REG_FP]; |
549 | u8 tc = bpf2mips64[JIT_REG_TC]; |
550 | u8 zx = bpf2mips64[JIT_REG_ZX]; |
551 | int stack, saved, locals, reserved; |
552 | |
553 | /* |
554 | * In the unlikely event that the TCC limit is raised to more |
555 | * than 16 bits, it is clamped to the maximum value allowed for |
556 | * the generated code (0xffff). It is better fail to compile |
557 | * instead of degrading gracefully. |
558 | */ |
559 | BUILD_BUG_ON(MAX_TAIL_CALL_CNT > 0xffff); |
560 | |
561 | /* |
562 | * The first instruction initializes the tail call count register. |
563 | * On a tail call, the calling function jumps into the prologue |
564 | * after this instruction. |
565 | */ |
566 | emit(ctx, ori, tc, MIPS_R_ZERO, MAX_TAIL_CALL_CNT); |
567 | |
568 | /* === Entry-point for tail calls === */ |
569 | |
570 | /* |
571 | * If the eBPF frame pointer and tail call count registers were |
572 | * accessed they must be preserved. Mark them as clobbered here |
573 | * to save and restore them on the stack as needed. |
574 | */ |
575 | if (ctx->accessed & BIT(BPF_REG_FP)) |
576 | clobber_reg(ctx, reg: fp); |
577 | if (ctx->accessed & BIT(JIT_REG_TC)) |
578 | clobber_reg(ctx, reg: tc); |
579 | if (ctx->accessed & BIT(JIT_REG_ZX)) |
580 | clobber_reg(ctx, reg: zx); |
581 | |
582 | /* Compute the stack space needed for callee-saved registers */ |
583 | saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64); |
584 | saved = ALIGN(saved, MIPS_STACK_ALIGNMENT); |
585 | |
586 | /* Stack space used by eBPF program local data */ |
587 | locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT); |
588 | |
589 | /* |
590 | * If we are emitting function calls, reserve extra stack space for |
591 | * caller-saved registers needed by the JIT. The required space is |
592 | * computed automatically during resource usage discovery (pass 1). |
593 | */ |
594 | reserved = ctx->stack_used; |
595 | |
596 | /* Allocate the stack frame */ |
597 | stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT); |
598 | if (stack) |
599 | emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack); |
600 | |
601 | /* Store callee-saved registers on stack */ |
602 | push_regs(ctx, mask: ctx->clobbered & JIT_CALLEE_REGS, excl: 0, depth: stack - saved); |
603 | |
604 | /* Initialize the eBPF frame pointer if accessed */ |
605 | if (ctx->accessed & BIT(BPF_REG_FP)) |
606 | emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved); |
607 | |
608 | /* Initialize the ePF JIT zero-extension register if accessed */ |
609 | if (ctx->accessed & BIT(JIT_REG_ZX)) { |
610 | emit(ctx, daddiu, zx, MIPS_R_ZERO, -1); |
611 | emit(ctx, dsrl32, zx, zx, 0); |
612 | } |
613 | |
614 | ctx->saved_size = saved; |
615 | ctx->stack_size = stack; |
616 | } |
617 | |
618 | /* Build the program epilogue to restore the stack and registers */ |
619 | void build_epilogue(struct jit_context *ctx, int dest_reg) |
620 | { |
621 | /* Restore callee-saved registers from stack */ |
622 | pop_regs(ctx, mask: ctx->clobbered & JIT_CALLEE_REGS, excl: 0, |
623 | depth: ctx->stack_size - ctx->saved_size); |
624 | |
625 | /* Release the stack frame */ |
626 | if (ctx->stack_size) |
627 | emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size); |
628 | |
629 | /* Jump to return address and sign-extend the 32-bit return value */ |
630 | emit(ctx, jr, dest_reg); |
631 | emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */ |
632 | } |
633 | |
634 | /* Build one eBPF instruction */ |
635 | int build_insn(const struct bpf_insn *insn, struct jit_context *ctx) |
636 | { |
637 | u8 dst = bpf2mips64[insn->dst_reg]; |
638 | u8 src = bpf2mips64[insn->src_reg]; |
639 | u8 res = bpf2mips64[BPF_REG_0]; |
640 | u8 code = insn->code; |
641 | s16 off = insn->off; |
642 | s32 imm = insn->imm; |
643 | s32 val, rel; |
644 | u8 alu, jmp; |
645 | |
646 | switch (code) { |
647 | /* ALU operations */ |
648 | /* dst = imm */ |
649 | case BPF_ALU | BPF_MOV | BPF_K: |
650 | emit_mov_i(ctx, dst, imm); |
651 | emit_zext_ver(ctx, dst); |
652 | break; |
653 | /* dst = src */ |
654 | case BPF_ALU | BPF_MOV | BPF_X: |
655 | if (imm == 1) { |
656 | /* Special mov32 for zext */ |
657 | emit_zext(ctx, dst); |
658 | } else { |
659 | emit_mov_r(ctx, dst, src); |
660 | emit_zext_ver(ctx, dst); |
661 | } |
662 | break; |
663 | /* dst = -dst */ |
664 | case BPF_ALU | BPF_NEG: |
665 | emit_sext(ctx, dst, src: dst); |
666 | emit_alu_i(ctx, dst, imm: 0, BPF_NEG); |
667 | emit_zext_ver(ctx, dst); |
668 | break; |
669 | /* dst = dst & imm */ |
670 | /* dst = dst | imm */ |
671 | /* dst = dst ^ imm */ |
672 | /* dst = dst << imm */ |
673 | case BPF_ALU | BPF_OR | BPF_K: |
674 | case BPF_ALU | BPF_AND | BPF_K: |
675 | case BPF_ALU | BPF_XOR | BPF_K: |
676 | case BPF_ALU | BPF_LSH | BPF_K: |
677 | if (!valid_alu_i(BPF_OP(code), imm)) { |
678 | emit_mov_i(ctx, MIPS_R_T4, imm); |
679 | emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); |
680 | } else if (rewrite_alu_i(BPF_OP(code), imm, alu: &alu, val: &val)) { |
681 | emit_alu_i(ctx, dst, imm: val, op: alu); |
682 | } |
683 | emit_zext_ver(ctx, dst); |
684 | break; |
685 | /* dst = dst >> imm */ |
686 | /* dst = dst >> imm (arithmetic) */ |
687 | /* dst = dst + imm */ |
688 | /* dst = dst - imm */ |
689 | /* dst = dst * imm */ |
690 | /* dst = dst / imm */ |
691 | /* dst = dst % imm */ |
692 | case BPF_ALU | BPF_RSH | BPF_K: |
693 | case BPF_ALU | BPF_ARSH | BPF_K: |
694 | case BPF_ALU | BPF_ADD | BPF_K: |
695 | case BPF_ALU | BPF_SUB | BPF_K: |
696 | case BPF_ALU | BPF_MUL | BPF_K: |
697 | case BPF_ALU | BPF_DIV | BPF_K: |
698 | case BPF_ALU | BPF_MOD | BPF_K: |
699 | if (!valid_alu_i(BPF_OP(code), imm)) { |
700 | emit_sext(ctx, dst, src: dst); |
701 | emit_mov_i(ctx, MIPS_R_T4, imm); |
702 | emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); |
703 | } else if (rewrite_alu_i(BPF_OP(code), imm, alu: &alu, val: &val)) { |
704 | emit_sext(ctx, dst, src: dst); |
705 | emit_alu_i(ctx, dst, imm: val, op: alu); |
706 | } |
707 | emit_zext_ver(ctx, dst); |
708 | break; |
709 | /* dst = dst & src */ |
710 | /* dst = dst | src */ |
711 | /* dst = dst ^ src */ |
712 | /* dst = dst << src */ |
713 | case BPF_ALU | BPF_AND | BPF_X: |
714 | case BPF_ALU | BPF_OR | BPF_X: |
715 | case BPF_ALU | BPF_XOR | BPF_X: |
716 | case BPF_ALU | BPF_LSH | BPF_X: |
717 | emit_alu_r(ctx, dst, src, BPF_OP(code)); |
718 | emit_zext_ver(ctx, dst); |
719 | break; |
720 | /* dst = dst >> src */ |
721 | /* dst = dst >> src (arithmetic) */ |
722 | /* dst = dst + src */ |
723 | /* dst = dst - src */ |
724 | /* dst = dst * src */ |
725 | /* dst = dst / src */ |
726 | /* dst = dst % src */ |
727 | case BPF_ALU | BPF_RSH | BPF_X: |
728 | case BPF_ALU | BPF_ARSH | BPF_X: |
729 | case BPF_ALU | BPF_ADD | BPF_X: |
730 | case BPF_ALU | BPF_SUB | BPF_X: |
731 | case BPF_ALU | BPF_MUL | BPF_X: |
732 | case BPF_ALU | BPF_DIV | BPF_X: |
733 | case BPF_ALU | BPF_MOD | BPF_X: |
734 | emit_sext(ctx, dst, src: dst); |
735 | emit_sext(ctx, MIPS_R_T4, src); |
736 | emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code)); |
737 | emit_zext_ver(ctx, dst); |
738 | break; |
739 | /* dst = imm (64-bit) */ |
740 | case BPF_ALU64 | BPF_MOV | BPF_K: |
741 | emit_mov_i(ctx, dst, imm); |
742 | break; |
743 | /* dst = src (64-bit) */ |
744 | case BPF_ALU64 | BPF_MOV | BPF_X: |
745 | emit_mov_r(ctx, dst, src); |
746 | break; |
747 | /* dst = -dst (64-bit) */ |
748 | case BPF_ALU64 | BPF_NEG: |
749 | emit_alu_i64(ctx, dst, imm: 0, BPF_NEG); |
750 | break; |
751 | /* dst = dst & imm (64-bit) */ |
752 | /* dst = dst | imm (64-bit) */ |
753 | /* dst = dst ^ imm (64-bit) */ |
754 | /* dst = dst << imm (64-bit) */ |
755 | /* dst = dst >> imm (64-bit) */ |
756 | /* dst = dst >> imm ((64-bit, arithmetic) */ |
757 | /* dst = dst + imm (64-bit) */ |
758 | /* dst = dst - imm (64-bit) */ |
759 | /* dst = dst * imm (64-bit) */ |
760 | /* dst = dst / imm (64-bit) */ |
761 | /* dst = dst % imm (64-bit) */ |
762 | case BPF_ALU64 | BPF_AND | BPF_K: |
763 | case BPF_ALU64 | BPF_OR | BPF_K: |
764 | case BPF_ALU64 | BPF_XOR | BPF_K: |
765 | case BPF_ALU64 | BPF_LSH | BPF_K: |
766 | case BPF_ALU64 | BPF_RSH | BPF_K: |
767 | case BPF_ALU64 | BPF_ARSH | BPF_K: |
768 | case BPF_ALU64 | BPF_ADD | BPF_K: |
769 | case BPF_ALU64 | BPF_SUB | BPF_K: |
770 | case BPF_ALU64 | BPF_MUL | BPF_K: |
771 | case BPF_ALU64 | BPF_DIV | BPF_K: |
772 | case BPF_ALU64 | BPF_MOD | BPF_K: |
773 | if (!valid_alu_i(BPF_OP(code), imm)) { |
774 | emit_mov_i(ctx, MIPS_R_T4, imm); |
775 | emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code)); |
776 | } else if (rewrite_alu_i(BPF_OP(code), imm, alu: &alu, val: &val)) { |
777 | emit_alu_i64(ctx, dst, imm: val, op: alu); |
778 | } |
779 | break; |
780 | /* dst = dst & src (64-bit) */ |
781 | /* dst = dst | src (64-bit) */ |
782 | /* dst = dst ^ src (64-bit) */ |
783 | /* dst = dst << src (64-bit) */ |
784 | /* dst = dst >> src (64-bit) */ |
785 | /* dst = dst >> src (64-bit, arithmetic) */ |
786 | /* dst = dst + src (64-bit) */ |
787 | /* dst = dst - src (64-bit) */ |
788 | /* dst = dst * src (64-bit) */ |
789 | /* dst = dst / src (64-bit) */ |
790 | /* dst = dst % src (64-bit) */ |
791 | case BPF_ALU64 | BPF_AND | BPF_X: |
792 | case BPF_ALU64 | BPF_OR | BPF_X: |
793 | case BPF_ALU64 | BPF_XOR | BPF_X: |
794 | case BPF_ALU64 | BPF_LSH | BPF_X: |
795 | case BPF_ALU64 | BPF_RSH | BPF_X: |
796 | case BPF_ALU64 | BPF_ARSH | BPF_X: |
797 | case BPF_ALU64 | BPF_ADD | BPF_X: |
798 | case BPF_ALU64 | BPF_SUB | BPF_X: |
799 | case BPF_ALU64 | BPF_MUL | BPF_X: |
800 | case BPF_ALU64 | BPF_DIV | BPF_X: |
801 | case BPF_ALU64 | BPF_MOD | BPF_X: |
802 | emit_alu_r64(ctx, dst, src, BPF_OP(code)); |
803 | break; |
804 | /* dst = htole(dst) */ |
805 | /* dst = htobe(dst) */ |
806 | case BPF_ALU | BPF_END | BPF_FROM_LE: |
807 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
808 | if (BPF_SRC(code) == |
809 | #ifdef __BIG_ENDIAN |
810 | BPF_FROM_LE |
811 | #else |
812 | BPF_FROM_BE |
813 | #endif |
814 | ) |
815 | emit_bswap_r64(ctx, dst, width: imm); |
816 | else |
817 | emit_trunc_r64(ctx, dst, width: imm); |
818 | break; |
819 | /* dst = imm64 */ |
820 | case BPF_LD | BPF_IMM | BPF_DW: |
821 | emit_mov_i64(ctx, dst, imm64: (u32)imm | ((u64)insn[1].imm << 32)); |
822 | return 1; |
823 | /* LDX: dst = *(size *)(src + off) */ |
824 | case BPF_LDX | BPF_MEM | BPF_W: |
825 | case BPF_LDX | BPF_MEM | BPF_H: |
826 | case BPF_LDX | BPF_MEM | BPF_B: |
827 | case BPF_LDX | BPF_MEM | BPF_DW: |
828 | emit_ldx(ctx, dst, src, off, BPF_SIZE(code)); |
829 | break; |
830 | /* ST: *(size *)(dst + off) = imm */ |
831 | case BPF_ST | BPF_MEM | BPF_W: |
832 | case BPF_ST | BPF_MEM | BPF_H: |
833 | case BPF_ST | BPF_MEM | BPF_B: |
834 | case BPF_ST | BPF_MEM | BPF_DW: |
835 | emit_mov_i(ctx, MIPS_R_T4, imm); |
836 | emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code)); |
837 | break; |
838 | /* STX: *(size *)(dst + off) = src */ |
839 | case BPF_STX | BPF_MEM | BPF_W: |
840 | case BPF_STX | BPF_MEM | BPF_H: |
841 | case BPF_STX | BPF_MEM | BPF_B: |
842 | case BPF_STX | BPF_MEM | BPF_DW: |
843 | emit_stx(ctx, dst, src, off, BPF_SIZE(code)); |
844 | break; |
845 | /* Speculation barrier */ |
846 | case BPF_ST | BPF_NOSPEC: |
847 | break; |
848 | /* Atomics */ |
849 | case BPF_STX | BPF_ATOMIC | BPF_W: |
850 | case BPF_STX | BPF_ATOMIC | BPF_DW: |
851 | switch (imm) { |
852 | case BPF_ADD: |
853 | case BPF_ADD | BPF_FETCH: |
854 | case BPF_AND: |
855 | case BPF_AND | BPF_FETCH: |
856 | case BPF_OR: |
857 | case BPF_OR | BPF_FETCH: |
858 | case BPF_XOR: |
859 | case BPF_XOR | BPF_FETCH: |
860 | case BPF_XCHG: |
861 | if (BPF_SIZE(code) == BPF_DW) { |
862 | emit_atomic_r64(ctx, dst, src, off, code: imm); |
863 | } else if (imm & BPF_FETCH) { |
864 | u8 tmp = dst; |
865 | |
866 | if (src == dst) { /* Don't overwrite dst */ |
867 | emit_mov_r(ctx, MIPS_R_T4, src: dst); |
868 | tmp = MIPS_R_T4; |
869 | } |
870 | emit_sext(ctx, dst: src, src); |
871 | emit_atomic_r(ctx, dst: tmp, src, off, code: imm); |
872 | emit_zext_ver(ctx, dst: src); |
873 | } else { /* 32-bit, no fetch */ |
874 | emit_sext(ctx, MIPS_R_T4, src); |
875 | emit_atomic_r(ctx, dst, MIPS_R_T4, off, code: imm); |
876 | } |
877 | break; |
878 | case BPF_CMPXCHG: |
879 | if (BPF_SIZE(code) == BPF_DW) { |
880 | emit_cmpxchg_r64(ctx, dst, src, off); |
881 | } else { |
882 | u8 tmp = res; |
883 | |
884 | if (res == dst) /* Don't overwrite dst */ |
885 | tmp = MIPS_R_T4; |
886 | emit_sext(ctx, dst: tmp, src: res); |
887 | emit_sext(ctx, MIPS_R_T5, src); |
888 | emit_cmpxchg_r(ctx, dst, MIPS_R_T5, res: tmp, off); |
889 | if (res == dst) /* Restore result */ |
890 | emit_mov_r(ctx, dst: res, MIPS_R_T4); |
891 | /* Result zext inserted by verifier */ |
892 | } |
893 | break; |
894 | default: |
895 | goto notyet; |
896 | } |
897 | break; |
898 | /* PC += off if dst == src */ |
899 | /* PC += off if dst != src */ |
900 | /* PC += off if dst & src */ |
901 | /* PC += off if dst > src */ |
902 | /* PC += off if dst >= src */ |
903 | /* PC += off if dst < src */ |
904 | /* PC += off if dst <= src */ |
905 | /* PC += off if dst > src (signed) */ |
906 | /* PC += off if dst >= src (signed) */ |
907 | /* PC += off if dst < src (signed) */ |
908 | /* PC += off if dst <= src (signed) */ |
909 | case BPF_JMP32 | BPF_JEQ | BPF_X: |
910 | case BPF_JMP32 | BPF_JNE | BPF_X: |
911 | case BPF_JMP32 | BPF_JSET | BPF_X: |
912 | case BPF_JMP32 | BPF_JGT | BPF_X: |
913 | case BPF_JMP32 | BPF_JGE | BPF_X: |
914 | case BPF_JMP32 | BPF_JLT | BPF_X: |
915 | case BPF_JMP32 | BPF_JLE | BPF_X: |
916 | case BPF_JMP32 | BPF_JSGT | BPF_X: |
917 | case BPF_JMP32 | BPF_JSGE | BPF_X: |
918 | case BPF_JMP32 | BPF_JSLT | BPF_X: |
919 | case BPF_JMP32 | BPF_JSLE | BPF_X: |
920 | if (off == 0) |
921 | break; |
922 | setup_jmp_r(ctx, same_reg: dst == src, BPF_OP(code), bpf_off: off, jit_op: &jmp, jit_off: &rel); |
923 | emit_sext(ctx, MIPS_R_T4, src: dst); /* Sign-extended dst */ |
924 | emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */ |
925 | emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, off: rel, op: jmp); |
926 | if (finish_jmp(ctx, jit_op: jmp, bpf_off: off) < 0) |
927 | goto toofar; |
928 | break; |
929 | /* PC += off if dst == imm */ |
930 | /* PC += off if dst != imm */ |
931 | /* PC += off if dst & imm */ |
932 | /* PC += off if dst > imm */ |
933 | /* PC += off if dst >= imm */ |
934 | /* PC += off if dst < imm */ |
935 | /* PC += off if dst <= imm */ |
936 | /* PC += off if dst > imm (signed) */ |
937 | /* PC += off if dst >= imm (signed) */ |
938 | /* PC += off if dst < imm (signed) */ |
939 | /* PC += off if dst <= imm (signed) */ |
940 | case BPF_JMP32 | BPF_JEQ | BPF_K: |
941 | case BPF_JMP32 | BPF_JNE | BPF_K: |
942 | case BPF_JMP32 | BPF_JSET | BPF_K: |
943 | case BPF_JMP32 | BPF_JGT | BPF_K: |
944 | case BPF_JMP32 | BPF_JGE | BPF_K: |
945 | case BPF_JMP32 | BPF_JLT | BPF_K: |
946 | case BPF_JMP32 | BPF_JLE | BPF_K: |
947 | case BPF_JMP32 | BPF_JSGT | BPF_K: |
948 | case BPF_JMP32 | BPF_JSGE | BPF_K: |
949 | case BPF_JMP32 | BPF_JSLT | BPF_K: |
950 | case BPF_JMP32 | BPF_JSLE | BPF_K: |
951 | if (off == 0) |
952 | break; |
953 | setup_jmp_i(ctx, imm, width: 32, BPF_OP(code), bpf_off: off, jit_op: &jmp, jit_off: &rel); |
954 | emit_sext(ctx, MIPS_R_T4, src: dst); /* Sign-extended dst */ |
955 | if (valid_jmp_i(op: jmp, imm)) { |
956 | emit_jmp_i(ctx, MIPS_R_T4, imm, off: rel, op: jmp); |
957 | } else { |
958 | /* Move large immediate to register, sign-extended */ |
959 | emit_mov_i(ctx, MIPS_R_T5, imm); |
960 | emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, off: rel, op: jmp); |
961 | } |
962 | if (finish_jmp(ctx, jit_op: jmp, bpf_off: off) < 0) |
963 | goto toofar; |
964 | break; |
965 | /* PC += off if dst == src */ |
966 | /* PC += off if dst != src */ |
967 | /* PC += off if dst & src */ |
968 | /* PC += off if dst > src */ |
969 | /* PC += off if dst >= src */ |
970 | /* PC += off if dst < src */ |
971 | /* PC += off if dst <= src */ |
972 | /* PC += off if dst > src (signed) */ |
973 | /* PC += off if dst >= src (signed) */ |
974 | /* PC += off if dst < src (signed) */ |
975 | /* PC += off if dst <= src (signed) */ |
976 | case BPF_JMP | BPF_JEQ | BPF_X: |
977 | case BPF_JMP | BPF_JNE | BPF_X: |
978 | case BPF_JMP | BPF_JSET | BPF_X: |
979 | case BPF_JMP | BPF_JGT | BPF_X: |
980 | case BPF_JMP | BPF_JGE | BPF_X: |
981 | case BPF_JMP | BPF_JLT | BPF_X: |
982 | case BPF_JMP | BPF_JLE | BPF_X: |
983 | case BPF_JMP | BPF_JSGT | BPF_X: |
984 | case BPF_JMP | BPF_JSGE | BPF_X: |
985 | case BPF_JMP | BPF_JSLT | BPF_X: |
986 | case BPF_JMP | BPF_JSLE | BPF_X: |
987 | if (off == 0) |
988 | break; |
989 | setup_jmp_r(ctx, same_reg: dst == src, BPF_OP(code), bpf_off: off, jit_op: &jmp, jit_off: &rel); |
990 | emit_jmp_r(ctx, dst, src, off: rel, op: jmp); |
991 | if (finish_jmp(ctx, jit_op: jmp, bpf_off: off) < 0) |
992 | goto toofar; |
993 | break; |
994 | /* PC += off if dst == imm */ |
995 | /* PC += off if dst != imm */ |
996 | /* PC += off if dst & imm */ |
997 | /* PC += off if dst > imm */ |
998 | /* PC += off if dst >= imm */ |
999 | /* PC += off if dst < imm */ |
1000 | /* PC += off if dst <= imm */ |
1001 | /* PC += off if dst > imm (signed) */ |
1002 | /* PC += off if dst >= imm (signed) */ |
1003 | /* PC += off if dst < imm (signed) */ |
1004 | /* PC += off if dst <= imm (signed) */ |
1005 | case BPF_JMP | BPF_JEQ | BPF_K: |
1006 | case BPF_JMP | BPF_JNE | BPF_K: |
1007 | case BPF_JMP | BPF_JSET | BPF_K: |
1008 | case BPF_JMP | BPF_JGT | BPF_K: |
1009 | case BPF_JMP | BPF_JGE | BPF_K: |
1010 | case BPF_JMP | BPF_JLT | BPF_K: |
1011 | case BPF_JMP | BPF_JLE | BPF_K: |
1012 | case BPF_JMP | BPF_JSGT | BPF_K: |
1013 | case BPF_JMP | BPF_JSGE | BPF_K: |
1014 | case BPF_JMP | BPF_JSLT | BPF_K: |
1015 | case BPF_JMP | BPF_JSLE | BPF_K: |
1016 | if (off == 0) |
1017 | break; |
1018 | setup_jmp_i(ctx, imm, width: 64, BPF_OP(code), bpf_off: off, jit_op: &jmp, jit_off: &rel); |
1019 | if (valid_jmp_i(op: jmp, imm)) { |
1020 | emit_jmp_i(ctx, dst, imm, off: rel, op: jmp); |
1021 | } else { |
1022 | /* Move large immediate to register */ |
1023 | emit_mov_i(ctx, MIPS_R_T4, imm); |
1024 | emit_jmp_r(ctx, dst, MIPS_R_T4, off: rel, op: jmp); |
1025 | } |
1026 | if (finish_jmp(ctx, jit_op: jmp, bpf_off: off) < 0) |
1027 | goto toofar; |
1028 | break; |
1029 | /* PC += off */ |
1030 | case BPF_JMP | BPF_JA: |
1031 | if (off == 0) |
1032 | break; |
1033 | if (emit_ja(ctx, off) < 0) |
1034 | goto toofar; |
1035 | break; |
1036 | /* Tail call */ |
1037 | case BPF_JMP | BPF_TAIL_CALL: |
1038 | if (emit_tail_call(ctx) < 0) |
1039 | goto invalid; |
1040 | break; |
1041 | /* Function call */ |
1042 | case BPF_JMP | BPF_CALL: |
1043 | if (emit_call(ctx, insn) < 0) |
1044 | goto invalid; |
1045 | break; |
1046 | /* Function return */ |
1047 | case BPF_JMP | BPF_EXIT: |
1048 | /* |
1049 | * Optimization: when last instruction is EXIT |
1050 | * simply continue to epilogue. |
1051 | */ |
1052 | if (ctx->bpf_index == ctx->program->len - 1) |
1053 | break; |
1054 | if (emit_exit(ctx) < 0) |
1055 | goto toofar; |
1056 | break; |
1057 | |
1058 | default: |
1059 | invalid: |
1060 | pr_err_once("unknown opcode %02x\n" , code); |
1061 | return -EINVAL; |
1062 | notyet: |
1063 | pr_info_once("*** NOT YET: opcode %02x ***\n" , code); |
1064 | return -EFAULT; |
1065 | toofar: |
1066 | pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n" , |
1067 | ctx->bpf_index, code); |
1068 | return -E2BIG; |
1069 | } |
1070 | return 0; |
1071 | } |
1072 | |