1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * BPF JIT compiler |
4 | * |
5 | * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) |
6 | * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
7 | */ |
8 | #include <linux/netdevice.h> |
9 | #include <linux/filter.h> |
10 | #include <linux/if_vlan.h> |
11 | #include <linux/bpf.h> |
12 | #include <linux/memory.h> |
13 | #include <linux/sort.h> |
14 | #include <asm/extable.h> |
15 | #include <asm/ftrace.h> |
16 | #include <asm/set_memory.h> |
17 | #include <asm/nospec-branch.h> |
18 | #include <asm/text-patching.h> |
19 | #include <asm/unwind.h> |
20 | |
21 | static bool all_callee_regs_used[4] = {true, true, true, true}; |
22 | |
23 | static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) |
24 | { |
25 | if (len == 1) |
26 | *ptr = bytes; |
27 | else if (len == 2) |
28 | *(u16 *)ptr = bytes; |
29 | else { |
30 | *(u32 *)ptr = bytes; |
31 | barrier(); |
32 | } |
33 | return ptr + len; |
34 | } |
35 | |
36 | #define EMIT(bytes, len) \ |
37 | do { prog = emit_code(prog, bytes, len); } while (0) |
38 | |
39 | #define EMIT1(b1) EMIT(b1, 1) |
40 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) |
41 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) |
42 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) |
43 | |
44 | #define EMIT1_off32(b1, off) \ |
45 | do { EMIT1(b1); EMIT(off, 4); } while (0) |
46 | #define EMIT2_off32(b1, b2, off) \ |
47 | do { EMIT2(b1, b2); EMIT(off, 4); } while (0) |
48 | #define EMIT3_off32(b1, b2, b3, off) \ |
49 | do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) |
50 | #define EMIT4_off32(b1, b2, b3, b4, off) \ |
51 | do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) |
52 | |
53 | #ifdef CONFIG_X86_KERNEL_IBT |
54 | #define EMIT_ENDBR() EMIT(gen_endbr(), 4) |
55 | #else |
56 | #define EMIT_ENDBR() |
57 | #endif |
58 | |
59 | static bool is_imm8(int value) |
60 | { |
61 | return value <= 127 && value >= -128; |
62 | } |
63 | |
64 | static bool is_simm32(s64 value) |
65 | { |
66 | return value == (s64)(s32)value; |
67 | } |
68 | |
69 | static bool is_uimm32(u64 value) |
70 | { |
71 | return value == (u64)(u32)value; |
72 | } |
73 | |
74 | /* mov dst, src */ |
75 | #define EMIT_mov(DST, SRC) \ |
76 | do { \ |
77 | if (DST != SRC) \ |
78 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ |
79 | } while (0) |
80 | |
81 | static int bpf_size_to_x86_bytes(int bpf_size) |
82 | { |
83 | if (bpf_size == BPF_W) |
84 | return 4; |
85 | else if (bpf_size == BPF_H) |
86 | return 2; |
87 | else if (bpf_size == BPF_B) |
88 | return 1; |
89 | else if (bpf_size == BPF_DW) |
90 | return 4; /* imm32 */ |
91 | else |
92 | return 0; |
93 | } |
94 | |
95 | /* |
96 | * List of x86 cond jumps opcodes (. + s8) |
97 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) |
98 | */ |
99 | #define X86_JB 0x72 |
100 | #define X86_JAE 0x73 |
101 | #define X86_JE 0x74 |
102 | #define X86_JNE 0x75 |
103 | #define X86_JBE 0x76 |
104 | #define X86_JA 0x77 |
105 | #define X86_JL 0x7C |
106 | #define X86_JGE 0x7D |
107 | #define X86_JLE 0x7E |
108 | #define X86_JG 0x7F |
109 | |
110 | /* Pick a register outside of BPF range for JIT internal work */ |
111 | #define AUX_REG (MAX_BPF_JIT_REG + 1) |
112 | #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) |
113 | |
114 | /* |
115 | * The following table maps BPF registers to x86-64 registers. |
116 | * |
117 | * x86-64 register R12 is unused, since if used as base address |
118 | * register in load/store instructions, it always needs an |
119 | * extra byte of encoding and is callee saved. |
120 | * |
121 | * x86-64 register R9 is not used by BPF programs, but can be used by BPF |
122 | * trampoline. x86-64 register R10 is used for blinding (if enabled). |
123 | */ |
124 | static const int reg2hex[] = { |
125 | [BPF_REG_0] = 0, /* RAX */ |
126 | [BPF_REG_1] = 7, /* RDI */ |
127 | [BPF_REG_2] = 6, /* RSI */ |
128 | [BPF_REG_3] = 2, /* RDX */ |
129 | [BPF_REG_4] = 1, /* RCX */ |
130 | [BPF_REG_5] = 0, /* R8 */ |
131 | [BPF_REG_6] = 3, /* RBX callee saved */ |
132 | [BPF_REG_7] = 5, /* R13 callee saved */ |
133 | [BPF_REG_8] = 6, /* R14 callee saved */ |
134 | [BPF_REG_9] = 7, /* R15 callee saved */ |
135 | [BPF_REG_FP] = 5, /* RBP readonly */ |
136 | [BPF_REG_AX] = 2, /* R10 temp register */ |
137 | [AUX_REG] = 3, /* R11 temp register */ |
138 | [X86_REG_R9] = 1, /* R9 register, 6th function argument */ |
139 | }; |
140 | |
141 | static const int reg2pt_regs[] = { |
142 | [BPF_REG_0] = offsetof(struct pt_regs, ax), |
143 | [BPF_REG_1] = offsetof(struct pt_regs, di), |
144 | [BPF_REG_2] = offsetof(struct pt_regs, si), |
145 | [BPF_REG_3] = offsetof(struct pt_regs, dx), |
146 | [BPF_REG_4] = offsetof(struct pt_regs, cx), |
147 | [BPF_REG_5] = offsetof(struct pt_regs, r8), |
148 | [BPF_REG_6] = offsetof(struct pt_regs, bx), |
149 | [BPF_REG_7] = offsetof(struct pt_regs, r13), |
150 | [BPF_REG_8] = offsetof(struct pt_regs, r14), |
151 | [BPF_REG_9] = offsetof(struct pt_regs, r15), |
152 | }; |
153 | |
154 | /* |
155 | * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 |
156 | * which need extra byte of encoding. |
157 | * rax,rcx,...,rbp have simpler encoding |
158 | */ |
159 | static bool is_ereg(u32 reg) |
160 | { |
161 | return (1 << reg) & (BIT(BPF_REG_5) | |
162 | BIT(AUX_REG) | |
163 | BIT(BPF_REG_7) | |
164 | BIT(BPF_REG_8) | |
165 | BIT(BPF_REG_9) | |
166 | BIT(X86_REG_R9) | |
167 | BIT(BPF_REG_AX)); |
168 | } |
169 | |
170 | /* |
171 | * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 |
172 | * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte |
173 | * of encoding. al,cl,dl,bl have simpler encoding. |
174 | */ |
175 | static bool is_ereg_8l(u32 reg) |
176 | { |
177 | return is_ereg(reg) || |
178 | (1 << reg) & (BIT(BPF_REG_1) | |
179 | BIT(BPF_REG_2) | |
180 | BIT(BPF_REG_FP)); |
181 | } |
182 | |
183 | static bool is_axreg(u32 reg) |
184 | { |
185 | return reg == BPF_REG_0; |
186 | } |
187 | |
188 | /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ |
189 | static u8 add_1mod(u8 byte, u32 reg) |
190 | { |
191 | if (is_ereg(reg)) |
192 | byte |= 1; |
193 | return byte; |
194 | } |
195 | |
196 | static u8 add_2mod(u8 byte, u32 r1, u32 r2) |
197 | { |
198 | if (is_ereg(reg: r1)) |
199 | byte |= 1; |
200 | if (is_ereg(reg: r2)) |
201 | byte |= 4; |
202 | return byte; |
203 | } |
204 | |
205 | /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ |
206 | static u8 add_1reg(u8 byte, u32 dst_reg) |
207 | { |
208 | return byte + reg2hex[dst_reg]; |
209 | } |
210 | |
211 | /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ |
212 | static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) |
213 | { |
214 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); |
215 | } |
216 | |
217 | /* Some 1-byte opcodes for binary ALU operations */ |
218 | static u8 simple_alu_opcodes[] = { |
219 | [BPF_ADD] = 0x01, |
220 | [BPF_SUB] = 0x29, |
221 | [BPF_AND] = 0x21, |
222 | [BPF_OR] = 0x09, |
223 | [BPF_XOR] = 0x31, |
224 | [BPF_LSH] = 0xE0, |
225 | [BPF_RSH] = 0xE8, |
226 | [BPF_ARSH] = 0xF8, |
227 | }; |
228 | |
229 | static void jit_fill_hole(void *area, unsigned int size) |
230 | { |
231 | /* Fill whole space with INT3 instructions */ |
232 | memset(area, 0xcc, size); |
233 | } |
234 | |
235 | int bpf_arch_text_invalidate(void *dst, size_t len) |
236 | { |
237 | return IS_ERR_OR_NULL(ptr: text_poke_set(addr: dst, c: 0xcc, len)); |
238 | } |
239 | |
240 | struct jit_context { |
241 | int cleanup_addr; /* Epilogue code offset */ |
242 | |
243 | /* |
244 | * Program specific offsets of labels in the code; these rely on the |
245 | * JIT doing at least 2 passes, recording the position on the first |
246 | * pass, only to generate the correct offset on the second pass. |
247 | */ |
248 | int tail_call_direct_label; |
249 | int tail_call_indirect_label; |
250 | }; |
251 | |
252 | /* Maximum number of bytes emitted while JITing one eBPF insn */ |
253 | #define BPF_MAX_INSN_SIZE 128 |
254 | #define BPF_INSN_SAFETY 64 |
255 | |
256 | /* Number of bytes emit_patch() needs to generate instructions */ |
257 | #define X86_PATCH_SIZE 5 |
258 | /* Number of bytes that will be skipped on tailcall */ |
259 | #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE) |
260 | |
261 | static void push_r12(u8 **pprog) |
262 | { |
263 | u8 *prog = *pprog; |
264 | |
265 | EMIT2(0x41, 0x54); /* push r12 */ |
266 | *pprog = prog; |
267 | } |
268 | |
269 | static void push_callee_regs(u8 **pprog, bool *callee_regs_used) |
270 | { |
271 | u8 *prog = *pprog; |
272 | |
273 | if (callee_regs_used[0]) |
274 | EMIT1(0x53); /* push rbx */ |
275 | if (callee_regs_used[1]) |
276 | EMIT2(0x41, 0x55); /* push r13 */ |
277 | if (callee_regs_used[2]) |
278 | EMIT2(0x41, 0x56); /* push r14 */ |
279 | if (callee_regs_used[3]) |
280 | EMIT2(0x41, 0x57); /* push r15 */ |
281 | *pprog = prog; |
282 | } |
283 | |
284 | static void pop_r12(u8 **pprog) |
285 | { |
286 | u8 *prog = *pprog; |
287 | |
288 | EMIT2(0x41, 0x5C); /* pop r12 */ |
289 | *pprog = prog; |
290 | } |
291 | |
292 | static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) |
293 | { |
294 | u8 *prog = *pprog; |
295 | |
296 | if (callee_regs_used[3]) |
297 | EMIT2(0x41, 0x5F); /* pop r15 */ |
298 | if (callee_regs_used[2]) |
299 | EMIT2(0x41, 0x5E); /* pop r14 */ |
300 | if (callee_regs_used[1]) |
301 | EMIT2(0x41, 0x5D); /* pop r13 */ |
302 | if (callee_regs_used[0]) |
303 | EMIT1(0x5B); /* pop rbx */ |
304 | *pprog = prog; |
305 | } |
306 | |
307 | /* |
308 | * Emit x86-64 prologue code for BPF program. |
309 | * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes |
310 | * while jumping to another program |
311 | */ |
312 | static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, |
313 | bool tail_call_reachable, bool is_subprog, |
314 | bool is_exception_cb) |
315 | { |
316 | u8 *prog = *pprog; |
317 | |
318 | /* BPF trampoline can be made to work without these nops, |
319 | * but let's waste 5 bytes for now and optimize later |
320 | */ |
321 | EMIT_ENDBR(); |
322 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); |
323 | prog += X86_PATCH_SIZE; |
324 | if (!ebpf_from_cbpf) { |
325 | if (tail_call_reachable && !is_subprog) |
326 | /* When it's the entry of the whole tailcall context, |
327 | * zeroing rax means initialising tail_call_cnt. |
328 | */ |
329 | EMIT2(0x31, 0xC0); /* xor eax, eax */ |
330 | else |
331 | /* Keep the same instruction layout. */ |
332 | EMIT2(0x66, 0x90); /* nop2 */ |
333 | } |
334 | /* Exception callback receives FP as third parameter */ |
335 | if (is_exception_cb) { |
336 | EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ |
337 | EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ |
338 | /* The main frame must have exception_boundary as true, so we |
339 | * first restore those callee-saved regs from stack, before |
340 | * reusing the stack frame. |
341 | */ |
342 | pop_callee_regs(pprog: &prog, callee_regs_used: all_callee_regs_used); |
343 | pop_r12(pprog: &prog); |
344 | /* Reset the stack frame. */ |
345 | EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ |
346 | } else { |
347 | EMIT1(0x55); /* push rbp */ |
348 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ |
349 | } |
350 | |
351 | /* X86_TAIL_CALL_OFFSET is here */ |
352 | EMIT_ENDBR(); |
353 | |
354 | /* sub rsp, rounded_stack_depth */ |
355 | if (stack_depth) |
356 | EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); |
357 | if (tail_call_reachable) |
358 | EMIT1(0x50); /* push rax */ |
359 | *pprog = prog; |
360 | } |
361 | |
362 | static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) |
363 | { |
364 | u8 *prog = *pprog; |
365 | s64 offset; |
366 | |
367 | offset = func - (ip + X86_PATCH_SIZE); |
368 | if (!is_simm32(value: offset)) { |
369 | pr_err("Target call %p is out of range\n" , func); |
370 | return -ERANGE; |
371 | } |
372 | EMIT1_off32(opcode, offset); |
373 | *pprog = prog; |
374 | return 0; |
375 | } |
376 | |
377 | static int emit_call(u8 **pprog, void *func, void *ip) |
378 | { |
379 | return emit_patch(pprog, func, ip, opcode: 0xE8); |
380 | } |
381 | |
382 | static int emit_rsb_call(u8 **pprog, void *func, void *ip) |
383 | { |
384 | OPTIMIZER_HIDE_VAR(func); |
385 | x86_call_depth_emit_accounting(pprog, func); |
386 | return emit_patch(pprog, func, ip, opcode: 0xE8); |
387 | } |
388 | |
389 | static int emit_jump(u8 **pprog, void *func, void *ip) |
390 | { |
391 | return emit_patch(pprog, func, ip, opcode: 0xE9); |
392 | } |
393 | |
394 | static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
395 | void *old_addr, void *new_addr) |
396 | { |
397 | const u8 *nop_insn = x86_nops[5]; |
398 | u8 old_insn[X86_PATCH_SIZE]; |
399 | u8 new_insn[X86_PATCH_SIZE]; |
400 | u8 *prog; |
401 | int ret; |
402 | |
403 | memcpy(old_insn, nop_insn, X86_PATCH_SIZE); |
404 | if (old_addr) { |
405 | prog = old_insn; |
406 | ret = t == BPF_MOD_CALL ? |
407 | emit_call(pprog: &prog, func: old_addr, ip) : |
408 | emit_jump(pprog: &prog, func: old_addr, ip); |
409 | if (ret) |
410 | return ret; |
411 | } |
412 | |
413 | memcpy(new_insn, nop_insn, X86_PATCH_SIZE); |
414 | if (new_addr) { |
415 | prog = new_insn; |
416 | ret = t == BPF_MOD_CALL ? |
417 | emit_call(pprog: &prog, func: new_addr, ip) : |
418 | emit_jump(pprog: &prog, func: new_addr, ip); |
419 | if (ret) |
420 | return ret; |
421 | } |
422 | |
423 | ret = -EBUSY; |
424 | mutex_lock(&text_mutex); |
425 | if (memcmp(p: ip, q: old_insn, X86_PATCH_SIZE)) |
426 | goto out; |
427 | ret = 1; |
428 | if (memcmp(p: ip, q: new_insn, X86_PATCH_SIZE)) { |
429 | text_poke_bp(addr: ip, opcode: new_insn, X86_PATCH_SIZE, NULL); |
430 | ret = 0; |
431 | } |
432 | out: |
433 | mutex_unlock(lock: &text_mutex); |
434 | return ret; |
435 | } |
436 | |
437 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
438 | void *old_addr, void *new_addr) |
439 | { |
440 | if (!is_kernel_text(addr: (long)ip) && |
441 | !is_bpf_text_address(addr: (long)ip)) |
442 | /* BPF poking in modules is not supported */ |
443 | return -EINVAL; |
444 | |
445 | /* |
446 | * See emit_prologue(), for IBT builds the trampoline hook is preceded |
447 | * with an ENDBR instruction. |
448 | */ |
449 | if (is_endbr(val: *(u32 *)ip)) |
450 | ip += ENDBR_INSN_SIZE; |
451 | |
452 | return __bpf_arch_text_poke(ip, t, old_addr, new_addr); |
453 | } |
454 | |
455 | #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) |
456 | |
457 | static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) |
458 | { |
459 | u8 *prog = *pprog; |
460 | |
461 | if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { |
462 | EMIT_LFENCE(); |
463 | EMIT2(0xFF, 0xE0 + reg); |
464 | } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { |
465 | OPTIMIZER_HIDE_VAR(reg); |
466 | if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) |
467 | emit_jump(pprog: &prog, func: &__x86_indirect_jump_thunk_array[reg], ip); |
468 | else |
469 | emit_jump(pprog: &prog, func: &__x86_indirect_thunk_array[reg], ip); |
470 | } else { |
471 | EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ |
472 | if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS)) |
473 | EMIT1(0xCC); /* int3 */ |
474 | } |
475 | |
476 | *pprog = prog; |
477 | } |
478 | |
479 | static void emit_return(u8 **pprog, u8 *ip) |
480 | { |
481 | u8 *prog = *pprog; |
482 | |
483 | if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { |
484 | emit_jump(pprog: &prog, func: x86_return_thunk, ip); |
485 | } else { |
486 | EMIT1(0xC3); /* ret */ |
487 | if (IS_ENABLED(CONFIG_SLS)) |
488 | EMIT1(0xCC); /* int3 */ |
489 | } |
490 | |
491 | *pprog = prog; |
492 | } |
493 | |
494 | /* |
495 | * Generate the following code: |
496 | * |
497 | * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... |
498 | * if (index >= array->map.max_entries) |
499 | * goto out; |
500 | * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) |
501 | * goto out; |
502 | * prog = array->ptrs[index]; |
503 | * if (prog == NULL) |
504 | * goto out; |
505 | * goto *(prog->bpf_func + prologue_size); |
506 | * out: |
507 | */ |
508 | static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, |
509 | u8 **pprog, bool *callee_regs_used, |
510 | u32 stack_depth, u8 *ip, |
511 | struct jit_context *ctx) |
512 | { |
513 | int tcc_off = -4 - round_up(stack_depth, 8); |
514 | u8 *prog = *pprog, *start = *pprog; |
515 | int offset; |
516 | |
517 | /* |
518 | * rdi - pointer to ctx |
519 | * rsi - pointer to bpf_array |
520 | * rdx - index in bpf_array |
521 | */ |
522 | |
523 | /* |
524 | * if (index >= array->map.max_entries) |
525 | * goto out; |
526 | */ |
527 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
528 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ |
529 | offsetof(struct bpf_array, map.max_entries)); |
530 | |
531 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
532 | EMIT2(X86_JBE, offset); /* jbe out */ |
533 | |
534 | /* |
535 | * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) |
536 | * goto out; |
537 | */ |
538 | EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ |
539 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ |
540 | |
541 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
542 | EMIT2(X86_JAE, offset); /* jae out */ |
543 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ |
544 | EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ |
545 | |
546 | /* prog = array->ptrs[index]; */ |
547 | EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ |
548 | offsetof(struct bpf_array, ptrs)); |
549 | |
550 | /* |
551 | * if (prog == NULL) |
552 | * goto out; |
553 | */ |
554 | EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ |
555 | |
556 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
557 | EMIT2(X86_JE, offset); /* je out */ |
558 | |
559 | if (bpf_prog->aux->exception_boundary) { |
560 | pop_callee_regs(pprog: &prog, callee_regs_used: all_callee_regs_used); |
561 | pop_r12(pprog: &prog); |
562 | } else { |
563 | pop_callee_regs(pprog: &prog, callee_regs_used); |
564 | } |
565 | |
566 | EMIT1(0x58); /* pop rax */ |
567 | if (stack_depth) |
568 | EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ |
569 | round_up(stack_depth, 8)); |
570 | |
571 | /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ |
572 | EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ |
573 | offsetof(struct bpf_prog, bpf_func)); |
574 | EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ |
575 | X86_TAIL_CALL_OFFSET); |
576 | /* |
577 | * Now we're ready to jump into next BPF program |
578 | * rdi == ctx (1st arg) |
579 | * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET |
580 | */ |
581 | emit_indirect_jump(pprog: &prog, reg: 1 /* rcx */, ip: ip + (prog - start)); |
582 | |
583 | /* out: */ |
584 | ctx->tail_call_indirect_label = prog - start; |
585 | *pprog = prog; |
586 | } |
587 | |
588 | static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, |
589 | struct bpf_jit_poke_descriptor *poke, |
590 | u8 **pprog, u8 *ip, |
591 | bool *callee_regs_used, u32 stack_depth, |
592 | struct jit_context *ctx) |
593 | { |
594 | int tcc_off = -4 - round_up(stack_depth, 8); |
595 | u8 *prog = *pprog, *start = *pprog; |
596 | int offset; |
597 | |
598 | /* |
599 | * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT) |
600 | * goto out; |
601 | */ |
602 | EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */ |
603 | EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ |
604 | |
605 | offset = ctx->tail_call_direct_label - (prog + 2 - start); |
606 | EMIT2(X86_JAE, offset); /* jae out */ |
607 | EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ |
608 | EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */ |
609 | |
610 | poke->tailcall_bypass = ip + (prog - start); |
611 | poke->adj_off = X86_TAIL_CALL_OFFSET; |
612 | poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; |
613 | poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; |
614 | |
615 | emit_jump(pprog: &prog, func: (u8 *)poke->tailcall_target + X86_PATCH_SIZE, |
616 | ip: poke->tailcall_bypass); |
617 | |
618 | if (bpf_prog->aux->exception_boundary) { |
619 | pop_callee_regs(pprog: &prog, callee_regs_used: all_callee_regs_used); |
620 | pop_r12(pprog: &prog); |
621 | } else { |
622 | pop_callee_regs(pprog: &prog, callee_regs_used); |
623 | } |
624 | |
625 | EMIT1(0x58); /* pop rax */ |
626 | if (stack_depth) |
627 | EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); |
628 | |
629 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); |
630 | prog += X86_PATCH_SIZE; |
631 | |
632 | /* out: */ |
633 | ctx->tail_call_direct_label = prog - start; |
634 | |
635 | *pprog = prog; |
636 | } |
637 | |
638 | static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) |
639 | { |
640 | struct bpf_jit_poke_descriptor *poke; |
641 | struct bpf_array *array; |
642 | struct bpf_prog *target; |
643 | int i, ret; |
644 | |
645 | for (i = 0; i < prog->aux->size_poke_tab; i++) { |
646 | poke = &prog->aux->poke_tab[i]; |
647 | if (poke->aux && poke->aux != prog->aux) |
648 | continue; |
649 | |
650 | WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); |
651 | |
652 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) |
653 | continue; |
654 | |
655 | array = container_of(poke->tail_call.map, struct bpf_array, map); |
656 | mutex_lock(&array->aux->poke_mutex); |
657 | target = array->ptrs[poke->tail_call.key]; |
658 | if (target) { |
659 | ret = __bpf_arch_text_poke(ip: poke->tailcall_target, |
660 | t: BPF_MOD_JUMP, NULL, |
661 | new_addr: (u8 *)target->bpf_func + |
662 | poke->adj_off); |
663 | BUG_ON(ret < 0); |
664 | ret = __bpf_arch_text_poke(ip: poke->tailcall_bypass, |
665 | t: BPF_MOD_JUMP, |
666 | old_addr: (u8 *)poke->tailcall_target + |
667 | X86_PATCH_SIZE, NULL); |
668 | BUG_ON(ret < 0); |
669 | } |
670 | WRITE_ONCE(poke->tailcall_target_stable, true); |
671 | mutex_unlock(lock: &array->aux->poke_mutex); |
672 | } |
673 | } |
674 | |
675 | static void emit_mov_imm32(u8 **pprog, bool sign_propagate, |
676 | u32 dst_reg, const u32 imm32) |
677 | { |
678 | u8 *prog = *pprog; |
679 | u8 b1, b2, b3; |
680 | |
681 | /* |
682 | * Optimization: if imm32 is positive, use 'mov %eax, imm32' |
683 | * (which zero-extends imm32) to save 2 bytes. |
684 | */ |
685 | if (sign_propagate && (s32)imm32 < 0) { |
686 | /* 'mov %rax, imm32' sign extends imm32 */ |
687 | b1 = add_1mod(byte: 0x48, reg: dst_reg); |
688 | b2 = 0xC7; |
689 | b3 = 0xC0; |
690 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); |
691 | goto done; |
692 | } |
693 | |
694 | /* |
695 | * Optimization: if imm32 is zero, use 'xor %eax, %eax' |
696 | * to save 3 bytes. |
697 | */ |
698 | if (imm32 == 0) { |
699 | if (is_ereg(reg: dst_reg)) |
700 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); |
701 | b2 = 0x31; /* xor */ |
702 | b3 = 0xC0; |
703 | EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); |
704 | goto done; |
705 | } |
706 | |
707 | /* mov %eax, imm32 */ |
708 | if (is_ereg(reg: dst_reg)) |
709 | EMIT1(add_1mod(0x40, dst_reg)); |
710 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); |
711 | done: |
712 | *pprog = prog; |
713 | } |
714 | |
715 | static void emit_mov_imm64(u8 **pprog, u32 dst_reg, |
716 | const u32 imm32_hi, const u32 imm32_lo) |
717 | { |
718 | u8 *prog = *pprog; |
719 | |
720 | if (is_uimm32(value: ((u64)imm32_hi << 32) | (u32)imm32_lo)) { |
721 | /* |
722 | * For emitting plain u32, where sign bit must not be |
723 | * propagated LLVM tends to load imm64 over mov32 |
724 | * directly, so save couple of bytes by just doing |
725 | * 'mov %eax, imm32' instead. |
726 | */ |
727 | emit_mov_imm32(pprog: &prog, sign_propagate: false, dst_reg, imm32: imm32_lo); |
728 | } else { |
729 | /* movabsq rax, imm64 */ |
730 | EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); |
731 | EMIT(imm32_lo, 4); |
732 | EMIT(imm32_hi, 4); |
733 | } |
734 | |
735 | *pprog = prog; |
736 | } |
737 | |
738 | static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) |
739 | { |
740 | u8 *prog = *pprog; |
741 | |
742 | if (is64) { |
743 | /* mov dst, src */ |
744 | EMIT_mov(dst_reg, src_reg); |
745 | } else { |
746 | /* mov32 dst, src */ |
747 | if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
748 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
749 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); |
750 | } |
751 | |
752 | *pprog = prog; |
753 | } |
754 | |
755 | static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, |
756 | u32 src_reg) |
757 | { |
758 | u8 *prog = *pprog; |
759 | |
760 | if (is64) { |
761 | /* movs[b,w,l]q dst, src */ |
762 | if (num_bits == 8) |
763 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, |
764 | add_2reg(0xC0, src_reg, dst_reg)); |
765 | else if (num_bits == 16) |
766 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, |
767 | add_2reg(0xC0, src_reg, dst_reg)); |
768 | else if (num_bits == 32) |
769 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, |
770 | add_2reg(0xC0, src_reg, dst_reg)); |
771 | } else { |
772 | /* movs[b,w]l dst, src */ |
773 | if (num_bits == 8) { |
774 | EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, |
775 | add_2reg(0xC0, src_reg, dst_reg)); |
776 | } else if (num_bits == 16) { |
777 | if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
778 | EMIT1(add_2mod(0x40, src_reg, dst_reg)); |
779 | EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, |
780 | add_2reg(0xC0, src_reg, dst_reg)); |
781 | } |
782 | } |
783 | |
784 | *pprog = prog; |
785 | } |
786 | |
787 | /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ |
788 | static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) |
789 | { |
790 | u8 *prog = *pprog; |
791 | |
792 | if (is_imm8(value: off)) { |
793 | /* 1-byte signed displacement. |
794 | * |
795 | * If off == 0 we could skip this and save one extra byte, but |
796 | * special case of x86 R13 which always needs an offset is not |
797 | * worth the hassle |
798 | */ |
799 | EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); |
800 | } else { |
801 | /* 4-byte signed displacement */ |
802 | EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); |
803 | } |
804 | *pprog = prog; |
805 | } |
806 | |
807 | /* |
808 | * Emit a REX byte if it will be necessary to address these registers |
809 | */ |
810 | static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) |
811 | { |
812 | u8 *prog = *pprog; |
813 | |
814 | if (is64) |
815 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); |
816 | else if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
817 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); |
818 | *pprog = prog; |
819 | } |
820 | |
821 | /* |
822 | * Similar version of maybe_emit_mod() for a single register |
823 | */ |
824 | static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) |
825 | { |
826 | u8 *prog = *pprog; |
827 | |
828 | if (is64) |
829 | EMIT1(add_1mod(0x48, reg)); |
830 | else if (is_ereg(reg)) |
831 | EMIT1(add_1mod(0x40, reg)); |
832 | *pprog = prog; |
833 | } |
834 | |
835 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
836 | static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
837 | { |
838 | u8 *prog = *pprog; |
839 | |
840 | switch (size) { |
841 | case BPF_B: |
842 | /* Emit 'movzx rax, byte ptr [rax + off]' */ |
843 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); |
844 | break; |
845 | case BPF_H: |
846 | /* Emit 'movzx rax, word ptr [rax + off]' */ |
847 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); |
848 | break; |
849 | case BPF_W: |
850 | /* Emit 'mov eax, dword ptr [rax+0x14]' */ |
851 | if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
852 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); |
853 | else |
854 | EMIT1(0x8B); |
855 | break; |
856 | case BPF_DW: |
857 | /* Emit 'mov rax, qword ptr [rax+0x14]' */ |
858 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); |
859 | break; |
860 | } |
861 | emit_insn_suffix(pprog: &prog, ptr_reg: src_reg, val_reg: dst_reg, off); |
862 | *pprog = prog; |
863 | } |
864 | |
865 | /* LDSX: dst_reg = *(s8*)(src_reg + off) */ |
866 | static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
867 | { |
868 | u8 *prog = *pprog; |
869 | |
870 | switch (size) { |
871 | case BPF_B: |
872 | /* Emit 'movsx rax, byte ptr [rax + off]' */ |
873 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); |
874 | break; |
875 | case BPF_H: |
876 | /* Emit 'movsx rax, word ptr [rax + off]' */ |
877 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); |
878 | break; |
879 | case BPF_W: |
880 | /* Emit 'movsx rax, dword ptr [rax+0x14]' */ |
881 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); |
882 | break; |
883 | } |
884 | emit_insn_suffix(pprog: &prog, ptr_reg: src_reg, val_reg: dst_reg, off); |
885 | *pprog = prog; |
886 | } |
887 | |
888 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
889 | static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) |
890 | { |
891 | u8 *prog = *pprog; |
892 | |
893 | switch (size) { |
894 | case BPF_B: |
895 | /* Emit 'mov byte ptr [rax + off], al' */ |
896 | if (is_ereg(reg: dst_reg) || is_ereg_8l(reg: src_reg)) |
897 | /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ |
898 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); |
899 | else |
900 | EMIT1(0x88); |
901 | break; |
902 | case BPF_H: |
903 | if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
904 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); |
905 | else |
906 | EMIT2(0x66, 0x89); |
907 | break; |
908 | case BPF_W: |
909 | if (is_ereg(reg: dst_reg) || is_ereg(reg: src_reg)) |
910 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); |
911 | else |
912 | EMIT1(0x89); |
913 | break; |
914 | case BPF_DW: |
915 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); |
916 | break; |
917 | } |
918 | emit_insn_suffix(pprog: &prog, ptr_reg: dst_reg, val_reg: src_reg, off); |
919 | *pprog = prog; |
920 | } |
921 | |
922 | static int emit_atomic(u8 **pprog, u8 atomic_op, |
923 | u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) |
924 | { |
925 | u8 *prog = *pprog; |
926 | |
927 | EMIT1(0xF0); /* lock prefix */ |
928 | |
929 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg, is64: bpf_size == BPF_DW); |
930 | |
931 | /* emit opcode */ |
932 | switch (atomic_op) { |
933 | case BPF_ADD: |
934 | case BPF_AND: |
935 | case BPF_OR: |
936 | case BPF_XOR: |
937 | /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ |
938 | EMIT1(simple_alu_opcodes[atomic_op]); |
939 | break; |
940 | case BPF_ADD | BPF_FETCH: |
941 | /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ |
942 | EMIT2(0x0F, 0xC1); |
943 | break; |
944 | case BPF_XCHG: |
945 | /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ |
946 | EMIT1(0x87); |
947 | break; |
948 | case BPF_CMPXCHG: |
949 | /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ |
950 | EMIT2(0x0F, 0xB1); |
951 | break; |
952 | default: |
953 | pr_err("bpf_jit: unknown atomic opcode %02x\n" , atomic_op); |
954 | return -EFAULT; |
955 | } |
956 | |
957 | emit_insn_suffix(pprog: &prog, ptr_reg: dst_reg, val_reg: src_reg, off); |
958 | |
959 | *pprog = prog; |
960 | return 0; |
961 | } |
962 | |
963 | bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) |
964 | { |
965 | u32 reg = x->fixup >> 8; |
966 | |
967 | /* jump over faulting load and clear dest register */ |
968 | *(unsigned long *)((void *)regs + reg) = 0; |
969 | regs->ip += x->fixup & 0xff; |
970 | return true; |
971 | } |
972 | |
973 | static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, |
974 | bool *regs_used, bool *tail_call_seen) |
975 | { |
976 | int i; |
977 | |
978 | for (i = 1; i <= insn_cnt; i++, insn++) { |
979 | if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) |
980 | *tail_call_seen = true; |
981 | if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) |
982 | regs_used[0] = true; |
983 | if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) |
984 | regs_used[1] = true; |
985 | if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) |
986 | regs_used[2] = true; |
987 | if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) |
988 | regs_used[3] = true; |
989 | } |
990 | } |
991 | |
992 | static void emit_nops(u8 **pprog, int len) |
993 | { |
994 | u8 *prog = *pprog; |
995 | int i, noplen; |
996 | |
997 | while (len > 0) { |
998 | noplen = len; |
999 | |
1000 | if (noplen > ASM_NOP_MAX) |
1001 | noplen = ASM_NOP_MAX; |
1002 | |
1003 | for (i = 0; i < noplen; i++) |
1004 | EMIT1(x86_nops[noplen][i]); |
1005 | len -= noplen; |
1006 | } |
1007 | |
1008 | *pprog = prog; |
1009 | } |
1010 | |
1011 | /* emit the 3-byte VEX prefix |
1012 | * |
1013 | * r: same as rex.r, extra bit for ModRM reg field |
1014 | * x: same as rex.x, extra bit for SIB index field |
1015 | * b: same as rex.b, extra bit for ModRM r/m, or SIB base |
1016 | * m: opcode map select, encoding escape bytes e.g. 0x0f38 |
1017 | * w: same as rex.w (32 bit or 64 bit) or opcode specific |
1018 | * src_reg2: additional source reg (encoded as BPF reg) |
1019 | * l: vector length (128 bit or 256 bit) or reserved |
1020 | * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) |
1021 | */ |
1022 | static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, |
1023 | bool w, u8 src_reg2, bool l, u8 pp) |
1024 | { |
1025 | u8 *prog = *pprog; |
1026 | const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ |
1027 | u8 b1, b2; |
1028 | u8 vvvv = reg2hex[src_reg2]; |
1029 | |
1030 | /* reg2hex gives only the lower 3 bit of vvvv */ |
1031 | if (is_ereg(reg: src_reg2)) |
1032 | vvvv |= 1 << 3; |
1033 | |
1034 | /* |
1035 | * 2nd byte of 3-byte VEX prefix |
1036 | * ~ means bit inverted encoding |
1037 | * |
1038 | * 7 0 |
1039 | * +---+---+---+---+---+---+---+---+ |
1040 | * |~R |~X |~B | m | |
1041 | * +---+---+---+---+---+---+---+---+ |
1042 | */ |
1043 | b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); |
1044 | /* |
1045 | * 3rd byte of 3-byte VEX prefix |
1046 | * |
1047 | * 7 0 |
1048 | * +---+---+---+---+---+---+---+---+ |
1049 | * | W | ~vvvv | L | pp | |
1050 | * +---+---+---+---+---+---+---+---+ |
1051 | */ |
1052 | b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); |
1053 | |
1054 | EMIT3(b0, b1, b2); |
1055 | *pprog = prog; |
1056 | } |
1057 | |
1058 | /* emit BMI2 shift instruction */ |
1059 | static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) |
1060 | { |
1061 | u8 *prog = *pprog; |
1062 | bool r = is_ereg(reg: dst_reg); |
1063 | u8 m = 2; /* escape code 0f38 */ |
1064 | |
1065 | emit_3vex(pprog: &prog, r, x: false, b: r, m, w: is64, src_reg2: src_reg, l: false, pp: op); |
1066 | EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); |
1067 | *pprog = prog; |
1068 | } |
1069 | |
1070 | #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) |
1071 | |
1072 | /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ |
1073 | #define RESTORE_TAIL_CALL_CNT(stack) \ |
1074 | EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) |
1075 | |
1076 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, |
1077 | int oldproglen, struct jit_context *ctx, bool jmp_padding) |
1078 | { |
1079 | bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; |
1080 | struct bpf_insn *insn = bpf_prog->insnsi; |
1081 | bool callee_regs_used[4] = {}; |
1082 | int insn_cnt = bpf_prog->len; |
1083 | bool tail_call_seen = false; |
1084 | bool seen_exit = false; |
1085 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; |
1086 | int i, excnt = 0; |
1087 | int ilen, proglen = 0; |
1088 | u8 *prog = temp; |
1089 | int err; |
1090 | |
1091 | detect_reg_usage(insn, insn_cnt, regs_used: callee_regs_used, |
1092 | tail_call_seen: &tail_call_seen); |
1093 | |
1094 | /* tail call's presence in current prog implies it is reachable */ |
1095 | tail_call_reachable |= tail_call_seen; |
1096 | |
1097 | emit_prologue(pprog: &prog, stack_depth: bpf_prog->aux->stack_depth, |
1098 | ebpf_from_cbpf: bpf_prog_was_classic(prog: bpf_prog), tail_call_reachable, |
1099 | is_subprog: bpf_is_subprog(prog: bpf_prog), is_exception_cb: bpf_prog->aux->exception_cb); |
1100 | /* Exception callback will clobber callee regs for its own use, and |
1101 | * restore the original callee regs from main prog's stack frame. |
1102 | */ |
1103 | if (bpf_prog->aux->exception_boundary) { |
1104 | /* We also need to save r12, which is not mapped to any BPF |
1105 | * register, as we throw after entry into the kernel, which may |
1106 | * overwrite r12. |
1107 | */ |
1108 | push_r12(pprog: &prog); |
1109 | push_callee_regs(pprog: &prog, callee_regs_used: all_callee_regs_used); |
1110 | } else { |
1111 | push_callee_regs(pprog: &prog, callee_regs_used); |
1112 | } |
1113 | |
1114 | ilen = prog - temp; |
1115 | if (rw_image) |
1116 | memcpy(rw_image + proglen, temp, ilen); |
1117 | proglen += ilen; |
1118 | addrs[0] = proglen; |
1119 | prog = temp; |
1120 | |
1121 | for (i = 1; i <= insn_cnt; i++, insn++) { |
1122 | const s32 imm32 = insn->imm; |
1123 | u32 dst_reg = insn->dst_reg; |
1124 | u32 src_reg = insn->src_reg; |
1125 | u8 b2 = 0, b3 = 0; |
1126 | u8 *start_of_ldx; |
1127 | s64 jmp_offset; |
1128 | s16 insn_off; |
1129 | u8 jmp_cond; |
1130 | u8 *func; |
1131 | int nops; |
1132 | |
1133 | switch (insn->code) { |
1134 | /* ALU */ |
1135 | case BPF_ALU | BPF_ADD | BPF_X: |
1136 | case BPF_ALU | BPF_SUB | BPF_X: |
1137 | case BPF_ALU | BPF_AND | BPF_X: |
1138 | case BPF_ALU | BPF_OR | BPF_X: |
1139 | case BPF_ALU | BPF_XOR | BPF_X: |
1140 | case BPF_ALU64 | BPF_ADD | BPF_X: |
1141 | case BPF_ALU64 | BPF_SUB | BPF_X: |
1142 | case BPF_ALU64 | BPF_AND | BPF_X: |
1143 | case BPF_ALU64 | BPF_OR | BPF_X: |
1144 | case BPF_ALU64 | BPF_XOR | BPF_X: |
1145 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg, |
1146 | BPF_CLASS(insn->code) == BPF_ALU64); |
1147 | b2 = simple_alu_opcodes[BPF_OP(insn->code)]; |
1148 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); |
1149 | break; |
1150 | |
1151 | case BPF_ALU64 | BPF_MOV | BPF_X: |
1152 | case BPF_ALU | BPF_MOV | BPF_X: |
1153 | if (insn->off == 0) |
1154 | emit_mov_reg(pprog: &prog, |
1155 | BPF_CLASS(insn->code) == BPF_ALU64, |
1156 | dst_reg, src_reg); |
1157 | else |
1158 | emit_movsx_reg(pprog: &prog, num_bits: insn->off, |
1159 | BPF_CLASS(insn->code) == BPF_ALU64, |
1160 | dst_reg, src_reg); |
1161 | break; |
1162 | |
1163 | /* neg dst */ |
1164 | case BPF_ALU | BPF_NEG: |
1165 | case BPF_ALU64 | BPF_NEG: |
1166 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1167 | BPF_CLASS(insn->code) == BPF_ALU64); |
1168 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); |
1169 | break; |
1170 | |
1171 | case BPF_ALU | BPF_ADD | BPF_K: |
1172 | case BPF_ALU | BPF_SUB | BPF_K: |
1173 | case BPF_ALU | BPF_AND | BPF_K: |
1174 | case BPF_ALU | BPF_OR | BPF_K: |
1175 | case BPF_ALU | BPF_XOR | BPF_K: |
1176 | case BPF_ALU64 | BPF_ADD | BPF_K: |
1177 | case BPF_ALU64 | BPF_SUB | BPF_K: |
1178 | case BPF_ALU64 | BPF_AND | BPF_K: |
1179 | case BPF_ALU64 | BPF_OR | BPF_K: |
1180 | case BPF_ALU64 | BPF_XOR | BPF_K: |
1181 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1182 | BPF_CLASS(insn->code) == BPF_ALU64); |
1183 | |
1184 | /* |
1185 | * b3 holds 'normal' opcode, b2 short form only valid |
1186 | * in case dst is eax/rax. |
1187 | */ |
1188 | switch (BPF_OP(insn->code)) { |
1189 | case BPF_ADD: |
1190 | b3 = 0xC0; |
1191 | b2 = 0x05; |
1192 | break; |
1193 | case BPF_SUB: |
1194 | b3 = 0xE8; |
1195 | b2 = 0x2D; |
1196 | break; |
1197 | case BPF_AND: |
1198 | b3 = 0xE0; |
1199 | b2 = 0x25; |
1200 | break; |
1201 | case BPF_OR: |
1202 | b3 = 0xC8; |
1203 | b2 = 0x0D; |
1204 | break; |
1205 | case BPF_XOR: |
1206 | b3 = 0xF0; |
1207 | b2 = 0x35; |
1208 | break; |
1209 | } |
1210 | |
1211 | if (is_imm8(value: imm32)) |
1212 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); |
1213 | else if (is_axreg(reg: dst_reg)) |
1214 | EMIT1_off32(b2, imm32); |
1215 | else |
1216 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); |
1217 | break; |
1218 | |
1219 | case BPF_ALU64 | BPF_MOV | BPF_K: |
1220 | case BPF_ALU | BPF_MOV | BPF_K: |
1221 | emit_mov_imm32(pprog: &prog, BPF_CLASS(insn->code) == BPF_ALU64, |
1222 | dst_reg, imm32); |
1223 | break; |
1224 | |
1225 | case BPF_LD | BPF_IMM | BPF_DW: |
1226 | emit_mov_imm64(pprog: &prog, dst_reg, imm32_hi: insn[1].imm, imm32_lo: insn[0].imm); |
1227 | insn++; |
1228 | i++; |
1229 | break; |
1230 | |
1231 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ |
1232 | case BPF_ALU | BPF_MOD | BPF_X: |
1233 | case BPF_ALU | BPF_DIV | BPF_X: |
1234 | case BPF_ALU | BPF_MOD | BPF_K: |
1235 | case BPF_ALU | BPF_DIV | BPF_K: |
1236 | case BPF_ALU64 | BPF_MOD | BPF_X: |
1237 | case BPF_ALU64 | BPF_DIV | BPF_X: |
1238 | case BPF_ALU64 | BPF_MOD | BPF_K: |
1239 | case BPF_ALU64 | BPF_DIV | BPF_K: { |
1240 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; |
1241 | |
1242 | if (dst_reg != BPF_REG_0) |
1243 | EMIT1(0x50); /* push rax */ |
1244 | if (dst_reg != BPF_REG_3) |
1245 | EMIT1(0x52); /* push rdx */ |
1246 | |
1247 | if (BPF_SRC(insn->code) == BPF_X) { |
1248 | if (src_reg == BPF_REG_0 || |
1249 | src_reg == BPF_REG_3) { |
1250 | /* mov r11, src_reg */ |
1251 | EMIT_mov(AUX_REG, src_reg); |
1252 | src_reg = AUX_REG; |
1253 | } |
1254 | } else { |
1255 | /* mov r11, imm32 */ |
1256 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); |
1257 | src_reg = AUX_REG; |
1258 | } |
1259 | |
1260 | if (dst_reg != BPF_REG_0) |
1261 | /* mov rax, dst_reg */ |
1262 | emit_mov_reg(pprog: &prog, is64, dst_reg: BPF_REG_0, src_reg: dst_reg); |
1263 | |
1264 | if (insn->off == 0) { |
1265 | /* |
1266 | * xor edx, edx |
1267 | * equivalent to 'xor rdx, rdx', but one byte less |
1268 | */ |
1269 | EMIT2(0x31, 0xd2); |
1270 | |
1271 | /* div src_reg */ |
1272 | maybe_emit_1mod(pprog: &prog, reg: src_reg, is64); |
1273 | EMIT2(0xF7, add_1reg(0xF0, src_reg)); |
1274 | } else { |
1275 | if (BPF_CLASS(insn->code) == BPF_ALU) |
1276 | EMIT1(0x99); /* cdq */ |
1277 | else |
1278 | EMIT2(0x48, 0x99); /* cqo */ |
1279 | |
1280 | /* idiv src_reg */ |
1281 | maybe_emit_1mod(pprog: &prog, reg: src_reg, is64); |
1282 | EMIT2(0xF7, add_1reg(0xF8, src_reg)); |
1283 | } |
1284 | |
1285 | if (BPF_OP(insn->code) == BPF_MOD && |
1286 | dst_reg != BPF_REG_3) |
1287 | /* mov dst_reg, rdx */ |
1288 | emit_mov_reg(pprog: &prog, is64, dst_reg, src_reg: BPF_REG_3); |
1289 | else if (BPF_OP(insn->code) == BPF_DIV && |
1290 | dst_reg != BPF_REG_0) |
1291 | /* mov dst_reg, rax */ |
1292 | emit_mov_reg(pprog: &prog, is64, dst_reg, src_reg: BPF_REG_0); |
1293 | |
1294 | if (dst_reg != BPF_REG_3) |
1295 | EMIT1(0x5A); /* pop rdx */ |
1296 | if (dst_reg != BPF_REG_0) |
1297 | EMIT1(0x58); /* pop rax */ |
1298 | break; |
1299 | } |
1300 | |
1301 | case BPF_ALU | BPF_MUL | BPF_K: |
1302 | case BPF_ALU64 | BPF_MUL | BPF_K: |
1303 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg: dst_reg, |
1304 | BPF_CLASS(insn->code) == BPF_ALU64); |
1305 | |
1306 | if (is_imm8(value: imm32)) |
1307 | /* imul dst_reg, dst_reg, imm8 */ |
1308 | EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), |
1309 | imm32); |
1310 | else |
1311 | /* imul dst_reg, dst_reg, imm32 */ |
1312 | EMIT2_off32(0x69, |
1313 | add_2reg(0xC0, dst_reg, dst_reg), |
1314 | imm32); |
1315 | break; |
1316 | |
1317 | case BPF_ALU | BPF_MUL | BPF_X: |
1318 | case BPF_ALU64 | BPF_MUL | BPF_X: |
1319 | maybe_emit_mod(pprog: &prog, dst_reg: src_reg, src_reg: dst_reg, |
1320 | BPF_CLASS(insn->code) == BPF_ALU64); |
1321 | |
1322 | /* imul dst_reg, src_reg */ |
1323 | EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); |
1324 | break; |
1325 | |
1326 | /* Shifts */ |
1327 | case BPF_ALU | BPF_LSH | BPF_K: |
1328 | case BPF_ALU | BPF_RSH | BPF_K: |
1329 | case BPF_ALU | BPF_ARSH | BPF_K: |
1330 | case BPF_ALU64 | BPF_LSH | BPF_K: |
1331 | case BPF_ALU64 | BPF_RSH | BPF_K: |
1332 | case BPF_ALU64 | BPF_ARSH | BPF_K: |
1333 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1334 | BPF_CLASS(insn->code) == BPF_ALU64); |
1335 | |
1336 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
1337 | if (imm32 == 1) |
1338 | EMIT2(0xD1, add_1reg(b3, dst_reg)); |
1339 | else |
1340 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); |
1341 | break; |
1342 | |
1343 | case BPF_ALU | BPF_LSH | BPF_X: |
1344 | case BPF_ALU | BPF_RSH | BPF_X: |
1345 | case BPF_ALU | BPF_ARSH | BPF_X: |
1346 | case BPF_ALU64 | BPF_LSH | BPF_X: |
1347 | case BPF_ALU64 | BPF_RSH | BPF_X: |
1348 | case BPF_ALU64 | BPF_ARSH | BPF_X: |
1349 | /* BMI2 shifts aren't better when shift count is already in rcx */ |
1350 | if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { |
1351 | /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ |
1352 | bool w = (BPF_CLASS(insn->code) == BPF_ALU64); |
1353 | u8 op; |
1354 | |
1355 | switch (BPF_OP(insn->code)) { |
1356 | case BPF_LSH: |
1357 | op = 1; /* prefix 0x66 */ |
1358 | break; |
1359 | case BPF_RSH: |
1360 | op = 3; /* prefix 0xf2 */ |
1361 | break; |
1362 | case BPF_ARSH: |
1363 | op = 2; /* prefix 0xf3 */ |
1364 | break; |
1365 | } |
1366 | |
1367 | emit_shiftx(pprog: &prog, dst_reg, src_reg, is64: w, op); |
1368 | |
1369 | break; |
1370 | } |
1371 | |
1372 | if (src_reg != BPF_REG_4) { /* common case */ |
1373 | /* Check for bad case when dst_reg == rcx */ |
1374 | if (dst_reg == BPF_REG_4) { |
1375 | /* mov r11, dst_reg */ |
1376 | EMIT_mov(AUX_REG, dst_reg); |
1377 | dst_reg = AUX_REG; |
1378 | } else { |
1379 | EMIT1(0x51); /* push rcx */ |
1380 | } |
1381 | /* mov rcx, src_reg */ |
1382 | EMIT_mov(BPF_REG_4, src_reg); |
1383 | } |
1384 | |
1385 | /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ |
1386 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1387 | BPF_CLASS(insn->code) == BPF_ALU64); |
1388 | |
1389 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
1390 | EMIT2(0xD3, add_1reg(b3, dst_reg)); |
1391 | |
1392 | if (src_reg != BPF_REG_4) { |
1393 | if (insn->dst_reg == BPF_REG_4) |
1394 | /* mov dst_reg, r11 */ |
1395 | EMIT_mov(insn->dst_reg, AUX_REG); |
1396 | else |
1397 | EMIT1(0x59); /* pop rcx */ |
1398 | } |
1399 | |
1400 | break; |
1401 | |
1402 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
1403 | case BPF_ALU64 | BPF_END | BPF_FROM_LE: |
1404 | switch (imm32) { |
1405 | case 16: |
1406 | /* Emit 'ror %ax, 8' to swap lower 2 bytes */ |
1407 | EMIT1(0x66); |
1408 | if (is_ereg(reg: dst_reg)) |
1409 | EMIT1(0x41); |
1410 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); |
1411 | |
1412 | /* Emit 'movzwl eax, ax' */ |
1413 | if (is_ereg(reg: dst_reg)) |
1414 | EMIT3(0x45, 0x0F, 0xB7); |
1415 | else |
1416 | EMIT2(0x0F, 0xB7); |
1417 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); |
1418 | break; |
1419 | case 32: |
1420 | /* Emit 'bswap eax' to swap lower 4 bytes */ |
1421 | if (is_ereg(reg: dst_reg)) |
1422 | EMIT2(0x41, 0x0F); |
1423 | else |
1424 | EMIT1(0x0F); |
1425 | EMIT1(add_1reg(0xC8, dst_reg)); |
1426 | break; |
1427 | case 64: |
1428 | /* Emit 'bswap rax' to swap 8 bytes */ |
1429 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, |
1430 | add_1reg(0xC8, dst_reg)); |
1431 | break; |
1432 | } |
1433 | break; |
1434 | |
1435 | case BPF_ALU | BPF_END | BPF_FROM_LE: |
1436 | switch (imm32) { |
1437 | case 16: |
1438 | /* |
1439 | * Emit 'movzwl eax, ax' to zero extend 16-bit |
1440 | * into 64 bit |
1441 | */ |
1442 | if (is_ereg(reg: dst_reg)) |
1443 | EMIT3(0x45, 0x0F, 0xB7); |
1444 | else |
1445 | EMIT2(0x0F, 0xB7); |
1446 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); |
1447 | break; |
1448 | case 32: |
1449 | /* Emit 'mov eax, eax' to clear upper 32-bits */ |
1450 | if (is_ereg(reg: dst_reg)) |
1451 | EMIT1(0x45); |
1452 | EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); |
1453 | break; |
1454 | case 64: |
1455 | /* nop */ |
1456 | break; |
1457 | } |
1458 | break; |
1459 | |
1460 | /* speculation barrier */ |
1461 | case BPF_ST | BPF_NOSPEC: |
1462 | EMIT_LFENCE(); |
1463 | break; |
1464 | |
1465 | /* ST: *(u8*)(dst_reg + off) = imm */ |
1466 | case BPF_ST | BPF_MEM | BPF_B: |
1467 | if (is_ereg(reg: dst_reg)) |
1468 | EMIT2(0x41, 0xC6); |
1469 | else |
1470 | EMIT1(0xC6); |
1471 | goto st; |
1472 | case BPF_ST | BPF_MEM | BPF_H: |
1473 | if (is_ereg(reg: dst_reg)) |
1474 | EMIT3(0x66, 0x41, 0xC7); |
1475 | else |
1476 | EMIT2(0x66, 0xC7); |
1477 | goto st; |
1478 | case BPF_ST | BPF_MEM | BPF_W: |
1479 | if (is_ereg(reg: dst_reg)) |
1480 | EMIT2(0x41, 0xC7); |
1481 | else |
1482 | EMIT1(0xC7); |
1483 | goto st; |
1484 | case BPF_ST | BPF_MEM | BPF_DW: |
1485 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); |
1486 | |
1487 | st: if (is_imm8(value: insn->off)) |
1488 | EMIT2(add_1reg(0x40, dst_reg), insn->off); |
1489 | else |
1490 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); |
1491 | |
1492 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); |
1493 | break; |
1494 | |
1495 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
1496 | case BPF_STX | BPF_MEM | BPF_B: |
1497 | case BPF_STX | BPF_MEM | BPF_H: |
1498 | case BPF_STX | BPF_MEM | BPF_W: |
1499 | case BPF_STX | BPF_MEM | BPF_DW: |
1500 | emit_stx(pprog: &prog, BPF_SIZE(insn->code), dst_reg, src_reg, off: insn->off); |
1501 | break; |
1502 | |
1503 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
1504 | case BPF_LDX | BPF_MEM | BPF_B: |
1505 | case BPF_LDX | BPF_PROBE_MEM | BPF_B: |
1506 | case BPF_LDX | BPF_MEM | BPF_H: |
1507 | case BPF_LDX | BPF_PROBE_MEM | BPF_H: |
1508 | case BPF_LDX | BPF_MEM | BPF_W: |
1509 | case BPF_LDX | BPF_PROBE_MEM | BPF_W: |
1510 | case BPF_LDX | BPF_MEM | BPF_DW: |
1511 | case BPF_LDX | BPF_PROBE_MEM | BPF_DW: |
1512 | /* LDXS: dst_reg = *(s8*)(src_reg + off) */ |
1513 | case BPF_LDX | BPF_MEMSX | BPF_B: |
1514 | case BPF_LDX | BPF_MEMSX | BPF_H: |
1515 | case BPF_LDX | BPF_MEMSX | BPF_W: |
1516 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: |
1517 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: |
1518 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: |
1519 | insn_off = insn->off; |
1520 | |
1521 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
1522 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { |
1523 | /* Conservatively check that src_reg + insn->off is a kernel address: |
1524 | * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE |
1525 | * src_reg is used as scratch for src_reg += insn->off and restored |
1526 | * after emit_ldx if necessary |
1527 | */ |
1528 | |
1529 | u64 limit = TASK_SIZE_MAX + PAGE_SIZE; |
1530 | u8 *end_of_jmp; |
1531 | |
1532 | /* At end of these emitted checks, insn->off will have been added |
1533 | * to src_reg, so no need to do relative load with insn->off offset |
1534 | */ |
1535 | insn_off = 0; |
1536 | |
1537 | /* movabsq r11, limit */ |
1538 | EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG)); |
1539 | EMIT((u32)limit, 4); |
1540 | EMIT(limit >> 32, 4); |
1541 | |
1542 | if (insn->off) { |
1543 | /* add src_reg, insn->off */ |
1544 | maybe_emit_1mod(pprog: &prog, reg: src_reg, is64: true); |
1545 | EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off); |
1546 | } |
1547 | |
1548 | /* cmp src_reg, r11 */ |
1549 | maybe_emit_mod(pprog: &prog, dst_reg: src_reg, AUX_REG, is64: true); |
1550 | EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG)); |
1551 | |
1552 | /* if unsigned '>=', goto load */ |
1553 | EMIT2(X86_JAE, 0); |
1554 | end_of_jmp = prog; |
1555 | |
1556 | /* xor dst_reg, dst_reg */ |
1557 | emit_mov_imm32(pprog: &prog, sign_propagate: false, dst_reg, imm32: 0); |
1558 | /* jmp byte_after_ldx */ |
1559 | EMIT2(0xEB, 0); |
1560 | |
1561 | /* populate jmp_offset for JAE above to jump to start_of_ldx */ |
1562 | start_of_ldx = prog; |
1563 | end_of_jmp[-1] = start_of_ldx - end_of_jmp; |
1564 | } |
1565 | if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || |
1566 | BPF_MODE(insn->code) == BPF_MEMSX) |
1567 | emit_ldsx(pprog: &prog, BPF_SIZE(insn->code), dst_reg, src_reg, off: insn_off); |
1568 | else |
1569 | emit_ldx(pprog: &prog, BPF_SIZE(insn->code), dst_reg, src_reg, off: insn_off); |
1570 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
1571 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { |
1572 | struct exception_table_entry *ex; |
1573 | u8 *_insn = image + proglen + (start_of_ldx - temp); |
1574 | s64 delta; |
1575 | |
1576 | /* populate jmp_offset for JMP above */ |
1577 | start_of_ldx[-1] = prog - start_of_ldx; |
1578 | |
1579 | if (insn->off && src_reg != dst_reg) { |
1580 | /* sub src_reg, insn->off |
1581 | * Restore src_reg after "add src_reg, insn->off" in prev |
1582 | * if statement. But if src_reg == dst_reg, emit_ldx |
1583 | * above already clobbered src_reg, so no need to restore. |
1584 | * If add src_reg, insn->off was unnecessary, no need to |
1585 | * restore either. |
1586 | */ |
1587 | maybe_emit_1mod(pprog: &prog, reg: src_reg, is64: true); |
1588 | EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off); |
1589 | } |
1590 | |
1591 | if (!bpf_prog->aux->extable) |
1592 | break; |
1593 | |
1594 | if (excnt >= bpf_prog->aux->num_exentries) { |
1595 | pr_err("ex gen bug\n" ); |
1596 | return -EFAULT; |
1597 | } |
1598 | ex = &bpf_prog->aux->extable[excnt++]; |
1599 | |
1600 | delta = _insn - (u8 *)&ex->insn; |
1601 | if (!is_simm32(value: delta)) { |
1602 | pr_err("extable->insn doesn't fit into 32-bit\n" ); |
1603 | return -EFAULT; |
1604 | } |
1605 | /* switch ex to rw buffer for writes */ |
1606 | ex = (void *)rw_image + ((void *)ex - (void *)image); |
1607 | |
1608 | ex->insn = delta; |
1609 | |
1610 | ex->data = EX_TYPE_BPF; |
1611 | |
1612 | if (dst_reg > BPF_REG_9) { |
1613 | pr_err("verifier error\n" ); |
1614 | return -EFAULT; |
1615 | } |
1616 | /* |
1617 | * Compute size of x86 insn and its target dest x86 register. |
1618 | * ex_handler_bpf() will use lower 8 bits to adjust |
1619 | * pt_regs->ip to jump over this x86 instruction |
1620 | * and upper bits to figure out which pt_regs to zero out. |
1621 | * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" |
1622 | * of 4 bytes will be ignored and rbx will be zero inited. |
1623 | */ |
1624 | ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); |
1625 | } |
1626 | break; |
1627 | |
1628 | case BPF_STX | BPF_ATOMIC | BPF_W: |
1629 | case BPF_STX | BPF_ATOMIC | BPF_DW: |
1630 | if (insn->imm == (BPF_AND | BPF_FETCH) || |
1631 | insn->imm == (BPF_OR | BPF_FETCH) || |
1632 | insn->imm == (BPF_XOR | BPF_FETCH)) { |
1633 | bool is64 = BPF_SIZE(insn->code) == BPF_DW; |
1634 | u32 real_src_reg = src_reg; |
1635 | u32 real_dst_reg = dst_reg; |
1636 | u8 *branch_target; |
1637 | |
1638 | /* |
1639 | * Can't be implemented with a single x86 insn. |
1640 | * Need to do a CMPXCHG loop. |
1641 | */ |
1642 | |
1643 | /* Will need RAX as a CMPXCHG operand so save R0 */ |
1644 | emit_mov_reg(pprog: &prog, is64: true, BPF_REG_AX, src_reg: BPF_REG_0); |
1645 | if (src_reg == BPF_REG_0) |
1646 | real_src_reg = BPF_REG_AX; |
1647 | if (dst_reg == BPF_REG_0) |
1648 | real_dst_reg = BPF_REG_AX; |
1649 | |
1650 | branch_target = prog; |
1651 | /* Load old value */ |
1652 | emit_ldx(pprog: &prog, BPF_SIZE(insn->code), |
1653 | dst_reg: BPF_REG_0, src_reg: real_dst_reg, off: insn->off); |
1654 | /* |
1655 | * Perform the (commutative) operation locally, |
1656 | * put the result in the AUX_REG. |
1657 | */ |
1658 | emit_mov_reg(pprog: &prog, is64, AUX_REG, src_reg: BPF_REG_0); |
1659 | maybe_emit_mod(pprog: &prog, AUX_REG, src_reg: real_src_reg, is64); |
1660 | EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], |
1661 | add_2reg(0xC0, AUX_REG, real_src_reg)); |
1662 | /* Attempt to swap in new value */ |
1663 | err = emit_atomic(pprog: &prog, BPF_CMPXCHG, |
1664 | dst_reg: real_dst_reg, AUX_REG, |
1665 | off: insn->off, |
1666 | BPF_SIZE(insn->code)); |
1667 | if (WARN_ON(err)) |
1668 | return err; |
1669 | /* |
1670 | * ZF tells us whether we won the race. If it's |
1671 | * cleared we need to try again. |
1672 | */ |
1673 | EMIT2(X86_JNE, -(prog - branch_target) - 2); |
1674 | /* Return the pre-modification value */ |
1675 | emit_mov_reg(pprog: &prog, is64, dst_reg: real_src_reg, src_reg: BPF_REG_0); |
1676 | /* Restore R0 after clobbering RAX */ |
1677 | emit_mov_reg(pprog: &prog, is64: true, dst_reg: BPF_REG_0, BPF_REG_AX); |
1678 | break; |
1679 | } |
1680 | |
1681 | err = emit_atomic(pprog: &prog, atomic_op: insn->imm, dst_reg, src_reg, |
1682 | off: insn->off, BPF_SIZE(insn->code)); |
1683 | if (err) |
1684 | return err; |
1685 | break; |
1686 | |
1687 | /* call */ |
1688 | case BPF_JMP | BPF_CALL: { |
1689 | int offs; |
1690 | |
1691 | func = (u8 *) __bpf_call_base + imm32; |
1692 | if (tail_call_reachable) { |
1693 | RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); |
1694 | if (!imm32) |
1695 | return -EINVAL; |
1696 | offs = 7 + x86_call_depth_emit_accounting(pprog: &prog, func); |
1697 | } else { |
1698 | if (!imm32) |
1699 | return -EINVAL; |
1700 | offs = x86_call_depth_emit_accounting(pprog: &prog, func); |
1701 | } |
1702 | if (emit_call(pprog: &prog, func, ip: image + addrs[i - 1] + offs)) |
1703 | return -EINVAL; |
1704 | break; |
1705 | } |
1706 | |
1707 | case BPF_JMP | BPF_TAIL_CALL: |
1708 | if (imm32) |
1709 | emit_bpf_tail_call_direct(bpf_prog, |
1710 | poke: &bpf_prog->aux->poke_tab[imm32 - 1], |
1711 | pprog: &prog, ip: image + addrs[i - 1], |
1712 | callee_regs_used, |
1713 | stack_depth: bpf_prog->aux->stack_depth, |
1714 | ctx); |
1715 | else |
1716 | emit_bpf_tail_call_indirect(bpf_prog, |
1717 | pprog: &prog, |
1718 | callee_regs_used, |
1719 | stack_depth: bpf_prog->aux->stack_depth, |
1720 | ip: image + addrs[i - 1], |
1721 | ctx); |
1722 | break; |
1723 | |
1724 | /* cond jump */ |
1725 | case BPF_JMP | BPF_JEQ | BPF_X: |
1726 | case BPF_JMP | BPF_JNE | BPF_X: |
1727 | case BPF_JMP | BPF_JGT | BPF_X: |
1728 | case BPF_JMP | BPF_JLT | BPF_X: |
1729 | case BPF_JMP | BPF_JGE | BPF_X: |
1730 | case BPF_JMP | BPF_JLE | BPF_X: |
1731 | case BPF_JMP | BPF_JSGT | BPF_X: |
1732 | case BPF_JMP | BPF_JSLT | BPF_X: |
1733 | case BPF_JMP | BPF_JSGE | BPF_X: |
1734 | case BPF_JMP | BPF_JSLE | BPF_X: |
1735 | case BPF_JMP32 | BPF_JEQ | BPF_X: |
1736 | case BPF_JMP32 | BPF_JNE | BPF_X: |
1737 | case BPF_JMP32 | BPF_JGT | BPF_X: |
1738 | case BPF_JMP32 | BPF_JLT | BPF_X: |
1739 | case BPF_JMP32 | BPF_JGE | BPF_X: |
1740 | case BPF_JMP32 | BPF_JLE | BPF_X: |
1741 | case BPF_JMP32 | BPF_JSGT | BPF_X: |
1742 | case BPF_JMP32 | BPF_JSLT | BPF_X: |
1743 | case BPF_JMP32 | BPF_JSGE | BPF_X: |
1744 | case BPF_JMP32 | BPF_JSLE | BPF_X: |
1745 | /* cmp dst_reg, src_reg */ |
1746 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg, |
1747 | BPF_CLASS(insn->code) == BPF_JMP); |
1748 | EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); |
1749 | goto emit_cond_jmp; |
1750 | |
1751 | case BPF_JMP | BPF_JSET | BPF_X: |
1752 | case BPF_JMP32 | BPF_JSET | BPF_X: |
1753 | /* test dst_reg, src_reg */ |
1754 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg, |
1755 | BPF_CLASS(insn->code) == BPF_JMP); |
1756 | EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); |
1757 | goto emit_cond_jmp; |
1758 | |
1759 | case BPF_JMP | BPF_JSET | BPF_K: |
1760 | case BPF_JMP32 | BPF_JSET | BPF_K: |
1761 | /* test dst_reg, imm32 */ |
1762 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1763 | BPF_CLASS(insn->code) == BPF_JMP); |
1764 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); |
1765 | goto emit_cond_jmp; |
1766 | |
1767 | case BPF_JMP | BPF_JEQ | BPF_K: |
1768 | case BPF_JMP | BPF_JNE | BPF_K: |
1769 | case BPF_JMP | BPF_JGT | BPF_K: |
1770 | case BPF_JMP | BPF_JLT | BPF_K: |
1771 | case BPF_JMP | BPF_JGE | BPF_K: |
1772 | case BPF_JMP | BPF_JLE | BPF_K: |
1773 | case BPF_JMP | BPF_JSGT | BPF_K: |
1774 | case BPF_JMP | BPF_JSLT | BPF_K: |
1775 | case BPF_JMP | BPF_JSGE | BPF_K: |
1776 | case BPF_JMP | BPF_JSLE | BPF_K: |
1777 | case BPF_JMP32 | BPF_JEQ | BPF_K: |
1778 | case BPF_JMP32 | BPF_JNE | BPF_K: |
1779 | case BPF_JMP32 | BPF_JGT | BPF_K: |
1780 | case BPF_JMP32 | BPF_JLT | BPF_K: |
1781 | case BPF_JMP32 | BPF_JGE | BPF_K: |
1782 | case BPF_JMP32 | BPF_JLE | BPF_K: |
1783 | case BPF_JMP32 | BPF_JSGT | BPF_K: |
1784 | case BPF_JMP32 | BPF_JSLT | BPF_K: |
1785 | case BPF_JMP32 | BPF_JSGE | BPF_K: |
1786 | case BPF_JMP32 | BPF_JSLE | BPF_K: |
1787 | /* test dst_reg, dst_reg to save one extra byte */ |
1788 | if (imm32 == 0) { |
1789 | maybe_emit_mod(pprog: &prog, dst_reg, src_reg: dst_reg, |
1790 | BPF_CLASS(insn->code) == BPF_JMP); |
1791 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); |
1792 | goto emit_cond_jmp; |
1793 | } |
1794 | |
1795 | /* cmp dst_reg, imm8/32 */ |
1796 | maybe_emit_1mod(pprog: &prog, reg: dst_reg, |
1797 | BPF_CLASS(insn->code) == BPF_JMP); |
1798 | |
1799 | if (is_imm8(value: imm32)) |
1800 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); |
1801 | else |
1802 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); |
1803 | |
1804 | emit_cond_jmp: /* Convert BPF opcode to x86 */ |
1805 | switch (BPF_OP(insn->code)) { |
1806 | case BPF_JEQ: |
1807 | jmp_cond = X86_JE; |
1808 | break; |
1809 | case BPF_JSET: |
1810 | case BPF_JNE: |
1811 | jmp_cond = X86_JNE; |
1812 | break; |
1813 | case BPF_JGT: |
1814 | /* GT is unsigned '>', JA in x86 */ |
1815 | jmp_cond = X86_JA; |
1816 | break; |
1817 | case BPF_JLT: |
1818 | /* LT is unsigned '<', JB in x86 */ |
1819 | jmp_cond = X86_JB; |
1820 | break; |
1821 | case BPF_JGE: |
1822 | /* GE is unsigned '>=', JAE in x86 */ |
1823 | jmp_cond = X86_JAE; |
1824 | break; |
1825 | case BPF_JLE: |
1826 | /* LE is unsigned '<=', JBE in x86 */ |
1827 | jmp_cond = X86_JBE; |
1828 | break; |
1829 | case BPF_JSGT: |
1830 | /* Signed '>', GT in x86 */ |
1831 | jmp_cond = X86_JG; |
1832 | break; |
1833 | case BPF_JSLT: |
1834 | /* Signed '<', LT in x86 */ |
1835 | jmp_cond = X86_JL; |
1836 | break; |
1837 | case BPF_JSGE: |
1838 | /* Signed '>=', GE in x86 */ |
1839 | jmp_cond = X86_JGE; |
1840 | break; |
1841 | case BPF_JSLE: |
1842 | /* Signed '<=', LE in x86 */ |
1843 | jmp_cond = X86_JLE; |
1844 | break; |
1845 | default: /* to silence GCC warning */ |
1846 | return -EFAULT; |
1847 | } |
1848 | jmp_offset = addrs[i + insn->off] - addrs[i]; |
1849 | if (is_imm8(value: jmp_offset)) { |
1850 | if (jmp_padding) { |
1851 | /* To keep the jmp_offset valid, the extra bytes are |
1852 | * padded before the jump insn, so we subtract the |
1853 | * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. |
1854 | * |
1855 | * If the previous pass already emits an imm8 |
1856 | * jmp_cond, then this BPF insn won't shrink, so |
1857 | * "nops" is 0. |
1858 | * |
1859 | * On the other hand, if the previous pass emits an |
1860 | * imm32 jmp_cond, the extra 4 bytes(*) is padded to |
1861 | * keep the image from shrinking further. |
1862 | * |
1863 | * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond |
1864 | * is 2 bytes, so the size difference is 4 bytes. |
1865 | */ |
1866 | nops = INSN_SZ_DIFF - 2; |
1867 | if (nops != 0 && nops != 4) { |
1868 | pr_err("unexpected jmp_cond padding: %d bytes\n" , |
1869 | nops); |
1870 | return -EFAULT; |
1871 | } |
1872 | emit_nops(pprog: &prog, len: nops); |
1873 | } |
1874 | EMIT2(jmp_cond, jmp_offset); |
1875 | } else if (is_simm32(value: jmp_offset)) { |
1876 | EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); |
1877 | } else { |
1878 | pr_err("cond_jmp gen bug %llx\n" , jmp_offset); |
1879 | return -EFAULT; |
1880 | } |
1881 | |
1882 | break; |
1883 | |
1884 | case BPF_JMP | BPF_JA: |
1885 | case BPF_JMP32 | BPF_JA: |
1886 | if (BPF_CLASS(insn->code) == BPF_JMP) { |
1887 | if (insn->off == -1) |
1888 | /* -1 jmp instructions will always jump |
1889 | * backwards two bytes. Explicitly handling |
1890 | * this case avoids wasting too many passes |
1891 | * when there are long sequences of replaced |
1892 | * dead code. |
1893 | */ |
1894 | jmp_offset = -2; |
1895 | else |
1896 | jmp_offset = addrs[i + insn->off] - addrs[i]; |
1897 | } else { |
1898 | if (insn->imm == -1) |
1899 | jmp_offset = -2; |
1900 | else |
1901 | jmp_offset = addrs[i + insn->imm] - addrs[i]; |
1902 | } |
1903 | |
1904 | if (!jmp_offset) { |
1905 | /* |
1906 | * If jmp_padding is enabled, the extra nops will |
1907 | * be inserted. Otherwise, optimize out nop jumps. |
1908 | */ |
1909 | if (jmp_padding) { |
1910 | /* There are 3 possible conditions. |
1911 | * (1) This BPF_JA is already optimized out in |
1912 | * the previous run, so there is no need |
1913 | * to pad any extra byte (0 byte). |
1914 | * (2) The previous pass emits an imm8 jmp, |
1915 | * so we pad 2 bytes to match the previous |
1916 | * insn size. |
1917 | * (3) Similarly, the previous pass emits an |
1918 | * imm32 jmp, and 5 bytes is padded. |
1919 | */ |
1920 | nops = INSN_SZ_DIFF; |
1921 | if (nops != 0 && nops != 2 && nops != 5) { |
1922 | pr_err("unexpected nop jump padding: %d bytes\n" , |
1923 | nops); |
1924 | return -EFAULT; |
1925 | } |
1926 | emit_nops(pprog: &prog, len: nops); |
1927 | } |
1928 | break; |
1929 | } |
1930 | emit_jmp: |
1931 | if (is_imm8(value: jmp_offset)) { |
1932 | if (jmp_padding) { |
1933 | /* To avoid breaking jmp_offset, the extra bytes |
1934 | * are padded before the actual jmp insn, so |
1935 | * 2 bytes is subtracted from INSN_SZ_DIFF. |
1936 | * |
1937 | * If the previous pass already emits an imm8 |
1938 | * jmp, there is nothing to pad (0 byte). |
1939 | * |
1940 | * If it emits an imm32 jmp (5 bytes) previously |
1941 | * and now an imm8 jmp (2 bytes), then we pad |
1942 | * (5 - 2 = 3) bytes to stop the image from |
1943 | * shrinking further. |
1944 | */ |
1945 | nops = INSN_SZ_DIFF - 2; |
1946 | if (nops != 0 && nops != 3) { |
1947 | pr_err("unexpected jump padding: %d bytes\n" , |
1948 | nops); |
1949 | return -EFAULT; |
1950 | } |
1951 | emit_nops(pprog: &prog, INSN_SZ_DIFF - 2); |
1952 | } |
1953 | EMIT2(0xEB, jmp_offset); |
1954 | } else if (is_simm32(value: jmp_offset)) { |
1955 | EMIT1_off32(0xE9, jmp_offset); |
1956 | } else { |
1957 | pr_err("jmp gen bug %llx\n" , jmp_offset); |
1958 | return -EFAULT; |
1959 | } |
1960 | break; |
1961 | |
1962 | case BPF_JMP | BPF_EXIT: |
1963 | if (seen_exit) { |
1964 | jmp_offset = ctx->cleanup_addr - addrs[i]; |
1965 | goto emit_jmp; |
1966 | } |
1967 | seen_exit = true; |
1968 | /* Update cleanup_addr */ |
1969 | ctx->cleanup_addr = proglen; |
1970 | if (bpf_prog->aux->exception_boundary) { |
1971 | pop_callee_regs(pprog: &prog, callee_regs_used: all_callee_regs_used); |
1972 | pop_r12(pprog: &prog); |
1973 | } else { |
1974 | pop_callee_regs(pprog: &prog, callee_regs_used); |
1975 | } |
1976 | EMIT1(0xC9); /* leave */ |
1977 | emit_return(pprog: &prog, ip: image + addrs[i - 1] + (prog - temp)); |
1978 | break; |
1979 | |
1980 | default: |
1981 | /* |
1982 | * By design x86-64 JIT should support all BPF instructions. |
1983 | * This error will be seen if new instruction was added |
1984 | * to the interpreter, but not to the JIT, or if there is |
1985 | * junk in bpf_prog. |
1986 | */ |
1987 | pr_err("bpf_jit: unknown opcode %02x\n" , insn->code); |
1988 | return -EINVAL; |
1989 | } |
1990 | |
1991 | ilen = prog - temp; |
1992 | if (ilen > BPF_MAX_INSN_SIZE) { |
1993 | pr_err("bpf_jit: fatal insn size error\n" ); |
1994 | return -EFAULT; |
1995 | } |
1996 | |
1997 | if (image) { |
1998 | /* |
1999 | * When populating the image, assert that: |
2000 | * |
2001 | * i) We do not write beyond the allocated space, and |
2002 | * ii) addrs[i] did not change from the prior run, in order |
2003 | * to validate assumptions made for computing branch |
2004 | * displacements. |
2005 | */ |
2006 | if (unlikely(proglen + ilen > oldproglen || |
2007 | proglen + ilen != addrs[i])) { |
2008 | pr_err("bpf_jit: fatal error\n" ); |
2009 | return -EFAULT; |
2010 | } |
2011 | memcpy(rw_image + proglen, temp, ilen); |
2012 | } |
2013 | proglen += ilen; |
2014 | addrs[i] = proglen; |
2015 | prog = temp; |
2016 | } |
2017 | |
2018 | if (image && excnt != bpf_prog->aux->num_exentries) { |
2019 | pr_err("extable is not populated\n" ); |
2020 | return -EFAULT; |
2021 | } |
2022 | return proglen; |
2023 | } |
2024 | |
2025 | static void clean_stack_garbage(const struct btf_func_model *m, |
2026 | u8 **pprog, int nr_stack_slots, |
2027 | int stack_size) |
2028 | { |
2029 | int arg_size, off; |
2030 | u8 *prog; |
2031 | |
2032 | /* Generally speaking, the compiler will pass the arguments |
2033 | * on-stack with "push" instruction, which will take 8-byte |
2034 | * on the stack. In this case, there won't be garbage values |
2035 | * while we copy the arguments from origin stack frame to current |
2036 | * in BPF_DW. |
2037 | * |
2038 | * However, sometimes the compiler will only allocate 4-byte on |
2039 | * the stack for the arguments. For now, this case will only |
2040 | * happen if there is only one argument on-stack and its size |
2041 | * not more than 4 byte. In this case, there will be garbage |
2042 | * values on the upper 4-byte where we store the argument on |
2043 | * current stack frame. |
2044 | * |
2045 | * arguments on origin stack: |
2046 | * |
2047 | * stack_arg_1(4-byte) xxx(4-byte) |
2048 | * |
2049 | * what we copy: |
2050 | * |
2051 | * stack_arg_1(8-byte): stack_arg_1(origin) xxx |
2052 | * |
2053 | * and the xxx is the garbage values which we should clean here. |
2054 | */ |
2055 | if (nr_stack_slots != 1) |
2056 | return; |
2057 | |
2058 | /* the size of the last argument */ |
2059 | arg_size = m->arg_size[m->nr_args - 1]; |
2060 | if (arg_size <= 4) { |
2061 | off = -(stack_size - 4); |
2062 | prog = *pprog; |
2063 | /* mov DWORD PTR [rbp + off], 0 */ |
2064 | if (!is_imm8(value: off)) |
2065 | EMIT2_off32(0xC7, 0x85, off); |
2066 | else |
2067 | EMIT3(0xC7, 0x45, off); |
2068 | EMIT(0, 4); |
2069 | *pprog = prog; |
2070 | } |
2071 | } |
2072 | |
2073 | /* get the count of the regs that are used to pass arguments */ |
2074 | static int get_nr_used_regs(const struct btf_func_model *m) |
2075 | { |
2076 | int i, arg_regs, nr_used_regs = 0; |
2077 | |
2078 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
2079 | arg_regs = (m->arg_size[i] + 7) / 8; |
2080 | if (nr_used_regs + arg_regs <= 6) |
2081 | nr_used_regs += arg_regs; |
2082 | |
2083 | if (nr_used_regs >= 6) |
2084 | break; |
2085 | } |
2086 | |
2087 | return nr_used_regs; |
2088 | } |
2089 | |
2090 | static void save_args(const struct btf_func_model *m, u8 **prog, |
2091 | int stack_size, bool for_call_origin) |
2092 | { |
2093 | int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; |
2094 | int i, j; |
2095 | |
2096 | /* Store function arguments to stack. |
2097 | * For a function that accepts two pointers the sequence will be: |
2098 | * mov QWORD PTR [rbp-0x10],rdi |
2099 | * mov QWORD PTR [rbp-0x8],rsi |
2100 | */ |
2101 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
2102 | arg_regs = (m->arg_size[i] + 7) / 8; |
2103 | |
2104 | /* According to the research of Yonghong, struct members |
2105 | * should be all in register or all on the stack. |
2106 | * Meanwhile, the compiler will pass the argument on regs |
2107 | * if the remaining regs can hold the argument. |
2108 | * |
2109 | * Disorder of the args can happen. For example: |
2110 | * |
2111 | * struct foo_struct { |
2112 | * long a; |
2113 | * int b; |
2114 | * }; |
2115 | * int foo(char, char, char, char, char, struct foo_struct, |
2116 | * char); |
2117 | * |
2118 | * the arg1-5,arg7 will be passed by regs, and arg6 will |
2119 | * by stack. |
2120 | */ |
2121 | if (nr_regs + arg_regs > 6) { |
2122 | /* copy function arguments from origin stack frame |
2123 | * into current stack frame. |
2124 | * |
2125 | * The starting address of the arguments on-stack |
2126 | * is: |
2127 | * rbp + 8(push rbp) + |
2128 | * 8(return addr of origin call) + |
2129 | * 8(return addr of the caller) |
2130 | * which means: rbp + 24 |
2131 | */ |
2132 | for (j = 0; j < arg_regs; j++) { |
2133 | emit_ldx(pprog: prog, BPF_DW, dst_reg: BPF_REG_0, BPF_REG_FP, |
2134 | off: nr_stack_slots * 8 + 0x18); |
2135 | emit_stx(pprog: prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, |
2136 | off: -stack_size); |
2137 | |
2138 | if (!nr_stack_slots) |
2139 | first_off = stack_size; |
2140 | stack_size -= 8; |
2141 | nr_stack_slots++; |
2142 | } |
2143 | } else { |
2144 | /* Only copy the arguments on-stack to current |
2145 | * 'stack_size' and ignore the regs, used to |
2146 | * prepare the arguments on-stack for orign call. |
2147 | */ |
2148 | if (for_call_origin) { |
2149 | nr_regs += arg_regs; |
2150 | continue; |
2151 | } |
2152 | |
2153 | /* copy the arguments from regs into stack */ |
2154 | for (j = 0; j < arg_regs; j++) { |
2155 | emit_stx(pprog: prog, BPF_DW, BPF_REG_FP, |
2156 | src_reg: nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, |
2157 | off: -stack_size); |
2158 | stack_size -= 8; |
2159 | nr_regs++; |
2160 | } |
2161 | } |
2162 | } |
2163 | |
2164 | clean_stack_garbage(m, pprog: prog, nr_stack_slots, stack_size: first_off); |
2165 | } |
2166 | |
2167 | static void restore_regs(const struct btf_func_model *m, u8 **prog, |
2168 | int stack_size) |
2169 | { |
2170 | int i, j, arg_regs, nr_regs = 0; |
2171 | |
2172 | /* Restore function arguments from stack. |
2173 | * For a function that accepts two pointers the sequence will be: |
2174 | * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] |
2175 | * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] |
2176 | * |
2177 | * The logic here is similar to what we do in save_args() |
2178 | */ |
2179 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
2180 | arg_regs = (m->arg_size[i] + 7) / 8; |
2181 | if (nr_regs + arg_regs <= 6) { |
2182 | for (j = 0; j < arg_regs; j++) { |
2183 | emit_ldx(pprog: prog, BPF_DW, |
2184 | dst_reg: nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, |
2185 | BPF_REG_FP, |
2186 | off: -stack_size); |
2187 | stack_size -= 8; |
2188 | nr_regs++; |
2189 | } |
2190 | } else { |
2191 | stack_size -= 8 * arg_regs; |
2192 | } |
2193 | |
2194 | if (nr_regs >= 6) |
2195 | break; |
2196 | } |
2197 | } |
2198 | |
2199 | static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, |
2200 | struct bpf_tramp_link *l, int stack_size, |
2201 | int run_ctx_off, bool save_ret) |
2202 | { |
2203 | u8 *prog = *pprog; |
2204 | u8 *jmp_insn; |
2205 | int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); |
2206 | struct bpf_prog *p = l->link.prog; |
2207 | u64 cookie = l->cookie; |
2208 | |
2209 | /* mov rdi, cookie */ |
2210 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_1, imm32_hi: (long) cookie >> 32, imm32_lo: (u32) (long) cookie); |
2211 | |
2212 | /* Prepare struct bpf_tramp_run_ctx. |
2213 | * |
2214 | * bpf_tramp_run_ctx is already preserved by |
2215 | * arch_prepare_bpf_trampoline(). |
2216 | * |
2217 | * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi |
2218 | */ |
2219 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_1, off: -run_ctx_off + ctx_cookie_off); |
2220 | |
2221 | /* arg1: mov rdi, progs[i] */ |
2222 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_1, imm32_hi: (long) p >> 32, imm32_lo: (u32) (long) p); |
2223 | /* arg2: lea rsi, [rbp - ctx_cookie_off] */ |
2224 | if (!is_imm8(value: -run_ctx_off)) |
2225 | EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); |
2226 | else |
2227 | EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); |
2228 | |
2229 | if (emit_rsb_call(pprog: &prog, func: bpf_trampoline_enter(prog: p), ip: prog)) |
2230 | return -EINVAL; |
2231 | /* remember prog start time returned by __bpf_prog_enter */ |
2232 | emit_mov_reg(pprog: &prog, is64: true, dst_reg: BPF_REG_6, src_reg: BPF_REG_0); |
2233 | |
2234 | /* if (__bpf_prog_enter*(prog) == 0) |
2235 | * goto skip_exec_of_prog; |
2236 | */ |
2237 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ |
2238 | /* emit 2 nops that will be replaced with JE insn */ |
2239 | jmp_insn = prog; |
2240 | emit_nops(pprog: &prog, len: 2); |
2241 | |
2242 | /* arg1: lea rdi, [rbp - stack_size] */ |
2243 | if (!is_imm8(value: -stack_size)) |
2244 | EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); |
2245 | else |
2246 | EMIT4(0x48, 0x8D, 0x7D, -stack_size); |
2247 | /* arg2: progs[i]->insnsi for interpreter */ |
2248 | if (!p->jited) |
2249 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_2, |
2250 | imm32_hi: (long) p->insnsi >> 32, |
2251 | imm32_lo: (u32) (long) p->insnsi); |
2252 | /* call JITed bpf program or interpreter */ |
2253 | if (emit_rsb_call(pprog: &prog, func: p->bpf_func, ip: prog)) |
2254 | return -EINVAL; |
2255 | |
2256 | /* |
2257 | * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return |
2258 | * of the previous call which is then passed on the stack to |
2259 | * the next BPF program. |
2260 | * |
2261 | * BPF_TRAMP_FENTRY trampoline may need to return the return |
2262 | * value of BPF_PROG_TYPE_STRUCT_OPS prog. |
2263 | */ |
2264 | if (save_ret) |
2265 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, off: -8); |
2266 | |
2267 | /* replace 2 nops with JE insn, since jmp target is known */ |
2268 | jmp_insn[0] = X86_JE; |
2269 | jmp_insn[1] = prog - jmp_insn - 2; |
2270 | |
2271 | /* arg1: mov rdi, progs[i] */ |
2272 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_1, imm32_hi: (long) p >> 32, imm32_lo: (u32) (long) p); |
2273 | /* arg2: mov rsi, rbx <- start time in nsec */ |
2274 | emit_mov_reg(pprog: &prog, is64: true, dst_reg: BPF_REG_2, src_reg: BPF_REG_6); |
2275 | /* arg3: lea rdx, [rbp - run_ctx_off] */ |
2276 | if (!is_imm8(value: -run_ctx_off)) |
2277 | EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); |
2278 | else |
2279 | EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); |
2280 | if (emit_rsb_call(pprog: &prog, func: bpf_trampoline_exit(prog: p), ip: prog)) |
2281 | return -EINVAL; |
2282 | |
2283 | *pprog = prog; |
2284 | return 0; |
2285 | } |
2286 | |
2287 | static void emit_align(u8 **pprog, u32 align) |
2288 | { |
2289 | u8 *target, *prog = *pprog; |
2290 | |
2291 | target = PTR_ALIGN(prog, align); |
2292 | if (target != prog) |
2293 | emit_nops(pprog: &prog, len: target - prog); |
2294 | |
2295 | *pprog = prog; |
2296 | } |
2297 | |
2298 | static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) |
2299 | { |
2300 | u8 *prog = *pprog; |
2301 | s64 offset; |
2302 | |
2303 | offset = func - (ip + 2 + 4); |
2304 | if (!is_simm32(value: offset)) { |
2305 | pr_err("Target %p is out of range\n" , func); |
2306 | return -EINVAL; |
2307 | } |
2308 | EMIT2_off32(0x0F, jmp_cond + 0x10, offset); |
2309 | *pprog = prog; |
2310 | return 0; |
2311 | } |
2312 | |
2313 | static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, |
2314 | struct bpf_tramp_links *tl, int stack_size, |
2315 | int run_ctx_off, bool save_ret) |
2316 | { |
2317 | int i; |
2318 | u8 *prog = *pprog; |
2319 | |
2320 | for (i = 0; i < tl->nr_links; i++) { |
2321 | if (invoke_bpf_prog(m, pprog: &prog, l: tl->links[i], stack_size, |
2322 | run_ctx_off, save_ret)) |
2323 | return -EINVAL; |
2324 | } |
2325 | *pprog = prog; |
2326 | return 0; |
2327 | } |
2328 | |
2329 | static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, |
2330 | struct bpf_tramp_links *tl, int stack_size, |
2331 | int run_ctx_off, u8 **branches) |
2332 | { |
2333 | u8 *prog = *pprog; |
2334 | int i; |
2335 | |
2336 | /* The first fmod_ret program will receive a garbage return value. |
2337 | * Set this to 0 to avoid confusing the program. |
2338 | */ |
2339 | emit_mov_imm32(pprog: &prog, sign_propagate: false, dst_reg: BPF_REG_0, imm32: 0); |
2340 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, off: -8); |
2341 | for (i = 0; i < tl->nr_links; i++) { |
2342 | if (invoke_bpf_prog(m, pprog: &prog, l: tl->links[i], stack_size, run_ctx_off, save_ret: true)) |
2343 | return -EINVAL; |
2344 | |
2345 | /* mod_ret prog stored return value into [rbp - 8]. Emit: |
2346 | * if (*(u64 *)(rbp - 8) != 0) |
2347 | * goto do_fexit; |
2348 | */ |
2349 | /* cmp QWORD PTR [rbp - 0x8], 0x0 */ |
2350 | EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); |
2351 | |
2352 | /* Save the location of the branch and Generate 6 nops |
2353 | * (4 bytes for an offset and 2 bytes for the jump) These nops |
2354 | * are replaced with a conditional jump once do_fexit (i.e. the |
2355 | * start of the fexit invocation) is finalized. |
2356 | */ |
2357 | branches[i] = prog; |
2358 | emit_nops(pprog: &prog, len: 4 + 2); |
2359 | } |
2360 | |
2361 | *pprog = prog; |
2362 | return 0; |
2363 | } |
2364 | |
2365 | /* Example: |
2366 | * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); |
2367 | * its 'struct btf_func_model' will be nr_args=2 |
2368 | * The assembly code when eth_type_trans is executing after trampoline: |
2369 | * |
2370 | * push rbp |
2371 | * mov rbp, rsp |
2372 | * sub rsp, 16 // space for skb and dev |
2373 | * push rbx // temp regs to pass start time |
2374 | * mov qword ptr [rbp - 16], rdi // save skb pointer to stack |
2375 | * mov qword ptr [rbp - 8], rsi // save dev pointer to stack |
2376 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
2377 | * mov rbx, rax // remember start time in bpf stats are enabled |
2378 | * lea rdi, [rbp - 16] // R1==ctx of bpf prog |
2379 | * call addr_of_jited_FENTRY_prog |
2380 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
2381 | * mov rsi, rbx // prog start time |
2382 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
2383 | * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack |
2384 | * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack |
2385 | * pop rbx |
2386 | * leave |
2387 | * ret |
2388 | * |
2389 | * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be |
2390 | * replaced with 'call generated_bpf_trampoline'. When it returns |
2391 | * eth_type_trans will continue executing with original skb and dev pointers. |
2392 | * |
2393 | * The assembly code when eth_type_trans is called from trampoline: |
2394 | * |
2395 | * push rbp |
2396 | * mov rbp, rsp |
2397 | * sub rsp, 24 // space for skb, dev, return value |
2398 | * push rbx // temp regs to pass start time |
2399 | * mov qword ptr [rbp - 24], rdi // save skb pointer to stack |
2400 | * mov qword ptr [rbp - 16], rsi // save dev pointer to stack |
2401 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
2402 | * mov rbx, rax // remember start time if bpf stats are enabled |
2403 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog |
2404 | * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev |
2405 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
2406 | * mov rsi, rbx // prog start time |
2407 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
2408 | * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack |
2409 | * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack |
2410 | * call eth_type_trans+5 // execute body of eth_type_trans |
2411 | * mov qword ptr [rbp - 8], rax // save return value |
2412 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable |
2413 | * mov rbx, rax // remember start time in bpf stats are enabled |
2414 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog |
2415 | * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value |
2416 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off |
2417 | * mov rsi, rbx // prog start time |
2418 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math |
2419 | * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value |
2420 | * pop rbx |
2421 | * leave |
2422 | * add rsp, 8 // skip eth_type_trans's frame |
2423 | * ret // return to its caller |
2424 | */ |
2425 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
2426 | const struct btf_func_model *m, u32 flags, |
2427 | struct bpf_tramp_links *tlinks, |
2428 | void *func_addr) |
2429 | { |
2430 | int i, ret, nr_regs = m->nr_args, stack_size = 0; |
2431 | int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; |
2432 | struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; |
2433 | struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; |
2434 | struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; |
2435 | void *orig_call = func_addr; |
2436 | u8 **branches = NULL; |
2437 | u8 *prog; |
2438 | bool save_ret; |
2439 | |
2440 | /* extra registers for struct arguments */ |
2441 | for (i = 0; i < m->nr_args; i++) |
2442 | if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) |
2443 | nr_regs += (m->arg_size[i] + 7) / 8 - 1; |
2444 | |
2445 | /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 |
2446 | * are passed through regs, the remains are through stack. |
2447 | */ |
2448 | if (nr_regs > MAX_BPF_FUNC_ARGS) |
2449 | return -ENOTSUPP; |
2450 | |
2451 | /* Generated trampoline stack layout: |
2452 | * |
2453 | * RBP + 8 [ return address ] |
2454 | * RBP + 0 [ RBP ] |
2455 | * |
2456 | * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or |
2457 | * BPF_TRAMP_F_RET_FENTRY_RET flags |
2458 | * |
2459 | * [ reg_argN ] always |
2460 | * [ ... ] |
2461 | * RBP - regs_off [ reg_arg1 ] program's ctx pointer |
2462 | * |
2463 | * RBP - nregs_off [ regs count ] always |
2464 | * |
2465 | * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag |
2466 | * |
2467 | * RBP - rbx_off [ rbx value ] always |
2468 | * |
2469 | * RBP - run_ctx_off [ bpf_tramp_run_ctx ] |
2470 | * |
2471 | * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG |
2472 | * [ ... ] |
2473 | * [ stack_arg2 ] |
2474 | * RBP - arg_stack_off [ stack_arg1 ] |
2475 | * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX |
2476 | */ |
2477 | |
2478 | /* room for return value of orig_call or fentry prog */ |
2479 | save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); |
2480 | if (save_ret) |
2481 | stack_size += 8; |
2482 | |
2483 | stack_size += nr_regs * 8; |
2484 | regs_off = stack_size; |
2485 | |
2486 | /* regs count */ |
2487 | stack_size += 8; |
2488 | nregs_off = stack_size; |
2489 | |
2490 | if (flags & BPF_TRAMP_F_IP_ARG) |
2491 | stack_size += 8; /* room for IP address argument */ |
2492 | |
2493 | ip_off = stack_size; |
2494 | |
2495 | stack_size += 8; |
2496 | rbx_off = stack_size; |
2497 | |
2498 | stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; |
2499 | run_ctx_off = stack_size; |
2500 | |
2501 | if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { |
2502 | /* the space that used to pass arguments on-stack */ |
2503 | stack_size += (nr_regs - get_nr_used_regs(m)) * 8; |
2504 | /* make sure the stack pointer is 16-byte aligned if we |
2505 | * need pass arguments on stack, which means |
2506 | * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] |
2507 | * should be 16-byte aligned. Following code depend on |
2508 | * that stack_size is already 8-byte aligned. |
2509 | */ |
2510 | stack_size += (stack_size % 16) ? 0 : 8; |
2511 | } |
2512 | |
2513 | arg_stack_off = stack_size; |
2514 | |
2515 | if (flags & BPF_TRAMP_F_SKIP_FRAME) { |
2516 | /* skip patched call instruction and point orig_call to actual |
2517 | * body of the kernel function. |
2518 | */ |
2519 | if (is_endbr(val: *(u32 *)orig_call)) |
2520 | orig_call += ENDBR_INSN_SIZE; |
2521 | orig_call += X86_PATCH_SIZE; |
2522 | } |
2523 | |
2524 | prog = image; |
2525 | |
2526 | EMIT_ENDBR(); |
2527 | /* |
2528 | * This is the direct-call trampoline, as such it needs accounting |
2529 | * for the __fentry__ call. |
2530 | */ |
2531 | x86_call_depth_emit_accounting(pprog: &prog, NULL); |
2532 | EMIT1(0x55); /* push rbp */ |
2533 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ |
2534 | if (!is_imm8(value: stack_size)) |
2535 | /* sub rsp, stack_size */ |
2536 | EMIT3_off32(0x48, 0x81, 0xEC, stack_size); |
2537 | else |
2538 | /* sub rsp, stack_size */ |
2539 | EMIT4(0x48, 0x83, 0xEC, stack_size); |
2540 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) |
2541 | EMIT1(0x50); /* push rax */ |
2542 | /* mov QWORD PTR [rbp - rbx_off], rbx */ |
2543 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_6, off: -rbx_off); |
2544 | |
2545 | /* Store number of argument registers of the traced function: |
2546 | * mov rax, nr_regs |
2547 | * mov QWORD PTR [rbp - nregs_off], rax |
2548 | */ |
2549 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_0, imm32_hi: 0, imm32_lo: (u32) nr_regs); |
2550 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, off: -nregs_off); |
2551 | |
2552 | if (flags & BPF_TRAMP_F_IP_ARG) { |
2553 | /* Store IP address of the traced function: |
2554 | * movabsq rax, func_addr |
2555 | * mov QWORD PTR [rbp - ip_off], rax |
2556 | */ |
2557 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_0, imm32_hi: (long) func_addr >> 32, imm32_lo: (u32) (long) func_addr); |
2558 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, off: -ip_off); |
2559 | } |
2560 | |
2561 | save_args(m, prog: &prog, stack_size: regs_off, for_call_origin: false); |
2562 | |
2563 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
2564 | /* arg1: mov rdi, im */ |
2565 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_1, imm32_hi: (long) im >> 32, imm32_lo: (u32) (long) im); |
2566 | if (emit_rsb_call(pprog: &prog, func: __bpf_tramp_enter, ip: prog)) { |
2567 | ret = -EINVAL; |
2568 | goto cleanup; |
2569 | } |
2570 | } |
2571 | |
2572 | if (fentry->nr_links) |
2573 | if (invoke_bpf(m, pprog: &prog, tl: fentry, stack_size: regs_off, run_ctx_off, |
2574 | save_ret: flags & BPF_TRAMP_F_RET_FENTRY_RET)) |
2575 | return -EINVAL; |
2576 | |
2577 | if (fmod_ret->nr_links) { |
2578 | branches = kcalloc(n: fmod_ret->nr_links, size: sizeof(u8 *), |
2579 | GFP_KERNEL); |
2580 | if (!branches) |
2581 | return -ENOMEM; |
2582 | |
2583 | if (invoke_bpf_mod_ret(m, pprog: &prog, tl: fmod_ret, stack_size: regs_off, |
2584 | run_ctx_off, branches)) { |
2585 | ret = -EINVAL; |
2586 | goto cleanup; |
2587 | } |
2588 | } |
2589 | |
2590 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
2591 | restore_regs(m, prog: &prog, stack_size: regs_off); |
2592 | save_args(m, prog: &prog, stack_size: arg_stack_off, for_call_origin: true); |
2593 | |
2594 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) |
2595 | /* Before calling the original function, restore the |
2596 | * tail_call_cnt from stack to rax. |
2597 | */ |
2598 | RESTORE_TAIL_CALL_CNT(stack_size); |
2599 | |
2600 | if (flags & BPF_TRAMP_F_ORIG_STACK) { |
2601 | emit_ldx(pprog: &prog, BPF_DW, dst_reg: BPF_REG_6, BPF_REG_FP, off: 8); |
2602 | EMIT2(0xff, 0xd3); /* call *rbx */ |
2603 | } else { |
2604 | /* call original function */ |
2605 | if (emit_rsb_call(pprog: &prog, func: orig_call, ip: prog)) { |
2606 | ret = -EINVAL; |
2607 | goto cleanup; |
2608 | } |
2609 | } |
2610 | /* remember return value in a stack for bpf prog to access */ |
2611 | emit_stx(pprog: &prog, BPF_DW, BPF_REG_FP, src_reg: BPF_REG_0, off: -8); |
2612 | im->ip_after_call = prog; |
2613 | memcpy(prog, x86_nops[5], X86_PATCH_SIZE); |
2614 | prog += X86_PATCH_SIZE; |
2615 | } |
2616 | |
2617 | if (fmod_ret->nr_links) { |
2618 | /* From Intel 64 and IA-32 Architectures Optimization |
2619 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler |
2620 | * Coding Rule 11: All branch targets should be 16-byte |
2621 | * aligned. |
2622 | */ |
2623 | emit_align(pprog: &prog, align: 16); |
2624 | /* Update the branches saved in invoke_bpf_mod_ret with the |
2625 | * aligned address of do_fexit. |
2626 | */ |
2627 | for (i = 0; i < fmod_ret->nr_links; i++) |
2628 | emit_cond_near_jump(pprog: &branches[i], func: prog, ip: branches[i], |
2629 | X86_JNE); |
2630 | } |
2631 | |
2632 | if (fexit->nr_links) |
2633 | if (invoke_bpf(m, pprog: &prog, tl: fexit, stack_size: regs_off, run_ctx_off, save_ret: false)) { |
2634 | ret = -EINVAL; |
2635 | goto cleanup; |
2636 | } |
2637 | |
2638 | if (flags & BPF_TRAMP_F_RESTORE_REGS) |
2639 | restore_regs(m, prog: &prog, stack_size: regs_off); |
2640 | |
2641 | /* This needs to be done regardless. If there were fmod_ret programs, |
2642 | * the return value is only updated on the stack and still needs to be |
2643 | * restored to R0. |
2644 | */ |
2645 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
2646 | im->ip_epilogue = prog; |
2647 | /* arg1: mov rdi, im */ |
2648 | emit_mov_imm64(pprog: &prog, dst_reg: BPF_REG_1, imm32_hi: (long) im >> 32, imm32_lo: (u32) (long) im); |
2649 | if (emit_rsb_call(pprog: &prog, func: __bpf_tramp_exit, ip: prog)) { |
2650 | ret = -EINVAL; |
2651 | goto cleanup; |
2652 | } |
2653 | } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) |
2654 | /* Before running the original function, restore the |
2655 | * tail_call_cnt from stack to rax. |
2656 | */ |
2657 | RESTORE_TAIL_CALL_CNT(stack_size); |
2658 | |
2659 | /* restore return value of orig_call or fentry prog back into RAX */ |
2660 | if (save_ret) |
2661 | emit_ldx(pprog: &prog, BPF_DW, dst_reg: BPF_REG_0, BPF_REG_FP, off: -8); |
2662 | |
2663 | emit_ldx(pprog: &prog, BPF_DW, dst_reg: BPF_REG_6, BPF_REG_FP, off: -rbx_off); |
2664 | EMIT1(0xC9); /* leave */ |
2665 | if (flags & BPF_TRAMP_F_SKIP_FRAME) |
2666 | /* skip our return address and return to parent */ |
2667 | EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ |
2668 | emit_return(pprog: &prog, ip: prog); |
2669 | /* Make sure the trampoline generation logic doesn't overflow */ |
2670 | if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { |
2671 | ret = -EFAULT; |
2672 | goto cleanup; |
2673 | } |
2674 | ret = prog - (u8 *)image; |
2675 | |
2676 | cleanup: |
2677 | kfree(objp: branches); |
2678 | return ret; |
2679 | } |
2680 | |
2681 | static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) |
2682 | { |
2683 | u8 *jg_reloc, *prog = *pprog; |
2684 | int pivot, err, jg_bytes = 1; |
2685 | s64 jg_offset; |
2686 | |
2687 | if (a == b) { |
2688 | /* Leaf node of recursion, i.e. not a range of indices |
2689 | * anymore. |
2690 | */ |
2691 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ |
2692 | if (!is_simm32(value: progs[a])) |
2693 | return -1; |
2694 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), |
2695 | progs[a]); |
2696 | err = emit_cond_near_jump(pprog: &prog, /* je func */ |
2697 | func: (void *)progs[a], ip: image + (prog - buf), |
2698 | X86_JE); |
2699 | if (err) |
2700 | return err; |
2701 | |
2702 | emit_indirect_jump(pprog: &prog, reg: 2 /* rdx */, ip: image + (prog - buf)); |
2703 | |
2704 | *pprog = prog; |
2705 | return 0; |
2706 | } |
2707 | |
2708 | /* Not a leaf node, so we pivot, and recursively descend into |
2709 | * the lower and upper ranges. |
2710 | */ |
2711 | pivot = (b - a) / 2; |
2712 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ |
2713 | if (!is_simm32(value: progs[a + pivot])) |
2714 | return -1; |
2715 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); |
2716 | |
2717 | if (pivot > 2) { /* jg upper_part */ |
2718 | /* Require near jump. */ |
2719 | jg_bytes = 4; |
2720 | EMIT2_off32(0x0F, X86_JG + 0x10, 0); |
2721 | } else { |
2722 | EMIT2(X86_JG, 0); |
2723 | } |
2724 | jg_reloc = prog; |
2725 | |
2726 | err = emit_bpf_dispatcher(pprog: &prog, a, b: a + pivot, /* emit lower_part */ |
2727 | progs, image, buf); |
2728 | if (err) |
2729 | return err; |
2730 | |
2731 | /* From Intel 64 and IA-32 Architectures Optimization |
2732 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler |
2733 | * Coding Rule 11: All branch targets should be 16-byte |
2734 | * aligned. |
2735 | */ |
2736 | emit_align(pprog: &prog, align: 16); |
2737 | jg_offset = prog - jg_reloc; |
2738 | emit_code(ptr: jg_reloc - jg_bytes, bytes: jg_offset, len: jg_bytes); |
2739 | |
2740 | err = emit_bpf_dispatcher(pprog: &prog, a: a + pivot + 1, /* emit upper_part */ |
2741 | b, progs, image, buf); |
2742 | if (err) |
2743 | return err; |
2744 | |
2745 | *pprog = prog; |
2746 | return 0; |
2747 | } |
2748 | |
2749 | static int cmp_ips(const void *a, const void *b) |
2750 | { |
2751 | const s64 *ipa = a; |
2752 | const s64 *ipb = b; |
2753 | |
2754 | if (*ipa > *ipb) |
2755 | return 1; |
2756 | if (*ipa < *ipb) |
2757 | return -1; |
2758 | return 0; |
2759 | } |
2760 | |
2761 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) |
2762 | { |
2763 | u8 *prog = buf; |
2764 | |
2765 | sort(base: funcs, num: num_funcs, size: sizeof(funcs[0]), cmp_func: cmp_ips, NULL); |
2766 | return emit_bpf_dispatcher(pprog: &prog, a: 0, b: num_funcs - 1, progs: funcs, image, buf); |
2767 | } |
2768 | |
2769 | struct x64_jit_data { |
2770 | struct bpf_binary_header *; |
2771 | struct bpf_binary_header *; |
2772 | int *addrs; |
2773 | u8 *image; |
2774 | int proglen; |
2775 | struct jit_context ctx; |
2776 | }; |
2777 | |
2778 | #define MAX_PASSES 20 |
2779 | #define PADDING_PASSES (MAX_PASSES - 5) |
2780 | |
2781 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
2782 | { |
2783 | struct bpf_binary_header * = NULL; |
2784 | struct bpf_binary_header * = NULL; |
2785 | struct bpf_prog *tmp, *orig_prog = prog; |
2786 | struct x64_jit_data *jit_data; |
2787 | int proglen, oldproglen = 0; |
2788 | struct jit_context ctx = {}; |
2789 | bool tmp_blinded = false; |
2790 | bool = false; |
2791 | bool padding = false; |
2792 | u8 *rw_image = NULL; |
2793 | u8 *image = NULL; |
2794 | int *addrs; |
2795 | int pass; |
2796 | int i; |
2797 | |
2798 | if (!prog->jit_requested) |
2799 | return orig_prog; |
2800 | |
2801 | tmp = bpf_jit_blind_constants(fp: prog); |
2802 | /* |
2803 | * If blinding was requested and we failed during blinding, |
2804 | * we must fall back to the interpreter. |
2805 | */ |
2806 | if (IS_ERR(ptr: tmp)) |
2807 | return orig_prog; |
2808 | if (tmp != prog) { |
2809 | tmp_blinded = true; |
2810 | prog = tmp; |
2811 | } |
2812 | |
2813 | jit_data = prog->aux->jit_data; |
2814 | if (!jit_data) { |
2815 | jit_data = kzalloc(size: sizeof(*jit_data), GFP_KERNEL); |
2816 | if (!jit_data) { |
2817 | prog = orig_prog; |
2818 | goto out; |
2819 | } |
2820 | prog->aux->jit_data = jit_data; |
2821 | } |
2822 | addrs = jit_data->addrs; |
2823 | if (addrs) { |
2824 | ctx = jit_data->ctx; |
2825 | oldproglen = jit_data->proglen; |
2826 | image = jit_data->image; |
2827 | header = jit_data->header; |
2828 | rw_header = jit_data->rw_header; |
2829 | rw_image = (void *)rw_header + ((void *)image - (void *)header); |
2830 | extra_pass = true; |
2831 | padding = true; |
2832 | goto skip_init_addrs; |
2833 | } |
2834 | addrs = kvmalloc_array(n: prog->len + 1, size: sizeof(*addrs), GFP_KERNEL); |
2835 | if (!addrs) { |
2836 | prog = orig_prog; |
2837 | goto out_addrs; |
2838 | } |
2839 | |
2840 | /* |
2841 | * Before first pass, make a rough estimation of addrs[] |
2842 | * each BPF instruction is translated to less than 64 bytes |
2843 | */ |
2844 | for (proglen = 0, i = 0; i <= prog->len; i++) { |
2845 | proglen += 64; |
2846 | addrs[i] = proglen; |
2847 | } |
2848 | ctx.cleanup_addr = proglen; |
2849 | skip_init_addrs: |
2850 | |
2851 | /* |
2852 | * JITed image shrinks with every pass and the loop iterates |
2853 | * until the image stops shrinking. Very large BPF programs |
2854 | * may converge on the last pass. In such case do one more |
2855 | * pass to emit the final image. |
2856 | */ |
2857 | for (pass = 0; pass < MAX_PASSES || image; pass++) { |
2858 | if (!padding && pass >= PADDING_PASSES) |
2859 | padding = true; |
2860 | proglen = do_jit(bpf_prog: prog, addrs, image, rw_image, oldproglen, ctx: &ctx, jmp_padding: padding); |
2861 | if (proglen <= 0) { |
2862 | out_image: |
2863 | image = NULL; |
2864 | if (header) { |
2865 | bpf_arch_text_copy(dst: &header->size, src: &rw_header->size, |
2866 | len: sizeof(rw_header->size)); |
2867 | bpf_jit_binary_pack_free(ro_header: header, rw_header); |
2868 | } |
2869 | /* Fall back to interpreter mode */ |
2870 | prog = orig_prog; |
2871 | if (extra_pass) { |
2872 | prog->bpf_func = NULL; |
2873 | prog->jited = 0; |
2874 | prog->jited_len = 0; |
2875 | } |
2876 | goto out_addrs; |
2877 | } |
2878 | if (image) { |
2879 | if (proglen != oldproglen) { |
2880 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n" , |
2881 | proglen, oldproglen); |
2882 | goto out_image; |
2883 | } |
2884 | break; |
2885 | } |
2886 | if (proglen == oldproglen) { |
2887 | /* |
2888 | * The number of entries in extable is the number of BPF_LDX |
2889 | * insns that access kernel memory via "pointer to BTF type". |
2890 | * The verifier changed their opcode from LDX|MEM|size |
2891 | * to LDX|PROBE_MEM|size to make JITing easier. |
2892 | */ |
2893 | u32 align = __alignof__(struct exception_table_entry); |
2894 | u32 extable_size = prog->aux->num_exentries * |
2895 | sizeof(struct exception_table_entry); |
2896 | |
2897 | /* allocate module memory for x86 insns and extable */ |
2898 | header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, |
2899 | ro_image: &image, alignment: align, rw_hdr: &rw_header, rw_image: &rw_image, |
2900 | bpf_fill_ill_insns: jit_fill_hole); |
2901 | if (!header) { |
2902 | prog = orig_prog; |
2903 | goto out_addrs; |
2904 | } |
2905 | prog->aux->extable = (void *) image + roundup(proglen, align); |
2906 | } |
2907 | oldproglen = proglen; |
2908 | cond_resched(); |
2909 | } |
2910 | |
2911 | if (bpf_jit_enable > 1) |
2912 | bpf_jit_dump(flen: prog->len, proglen, pass: pass + 1, image: rw_image); |
2913 | |
2914 | if (image) { |
2915 | if (!prog->is_func || extra_pass) { |
2916 | /* |
2917 | * bpf_jit_binary_pack_finalize fails in two scenarios: |
2918 | * 1) header is not pointing to proper module memory; |
2919 | * 2) the arch doesn't support bpf_arch_text_copy(). |
2920 | * |
2921 | * Both cases are serious bugs and justify WARN_ON. |
2922 | */ |
2923 | if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { |
2924 | /* header has been freed */ |
2925 | header = NULL; |
2926 | goto out_image; |
2927 | } |
2928 | |
2929 | bpf_tail_call_direct_fixup(prog); |
2930 | } else { |
2931 | jit_data->addrs = addrs; |
2932 | jit_data->ctx = ctx; |
2933 | jit_data->proglen = proglen; |
2934 | jit_data->image = image; |
2935 | jit_data->header = header; |
2936 | jit_data->rw_header = rw_header; |
2937 | } |
2938 | prog->bpf_func = (void *)image; |
2939 | prog->jited = 1; |
2940 | prog->jited_len = proglen; |
2941 | } else { |
2942 | prog = orig_prog; |
2943 | } |
2944 | |
2945 | if (!image || !prog->is_func || extra_pass) { |
2946 | if (image) |
2947 | bpf_prog_fill_jited_linfo(prog, insn_to_jit_off: addrs + 1); |
2948 | out_addrs: |
2949 | kvfree(addr: addrs); |
2950 | kfree(objp: jit_data); |
2951 | prog->aux->jit_data = NULL; |
2952 | } |
2953 | out: |
2954 | if (tmp_blinded) |
2955 | bpf_jit_prog_release_other(fp: prog, fp_other: prog == orig_prog ? |
2956 | tmp : orig_prog); |
2957 | return prog; |
2958 | } |
2959 | |
2960 | bool bpf_jit_supports_kfunc_call(void) |
2961 | { |
2962 | return true; |
2963 | } |
2964 | |
2965 | void *bpf_arch_text_copy(void *dst, void *src, size_t len) |
2966 | { |
2967 | if (text_poke_copy(addr: dst, opcode: src, len) == NULL) |
2968 | return ERR_PTR(error: -EINVAL); |
2969 | return dst; |
2970 | } |
2971 | |
2972 | /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ |
2973 | bool bpf_jit_supports_subprog_tailcalls(void) |
2974 | { |
2975 | return true; |
2976 | } |
2977 | |
2978 | void bpf_jit_free(struct bpf_prog *prog) |
2979 | { |
2980 | if (prog->jited) { |
2981 | struct x64_jit_data *jit_data = prog->aux->jit_data; |
2982 | struct bpf_binary_header *hdr; |
2983 | |
2984 | /* |
2985 | * If we fail the final pass of JIT (from jit_subprogs), |
2986 | * the program may not be finalized yet. Call finalize here |
2987 | * before freeing it. |
2988 | */ |
2989 | if (jit_data) { |
2990 | bpf_jit_binary_pack_finalize(prog, ro_header: jit_data->header, |
2991 | rw_header: jit_data->rw_header); |
2992 | kvfree(addr: jit_data->addrs); |
2993 | kfree(objp: jit_data); |
2994 | } |
2995 | hdr = bpf_jit_binary_pack_hdr(fp: prog); |
2996 | bpf_jit_binary_pack_free(ro_header: hdr, NULL); |
2997 | WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); |
2998 | } |
2999 | |
3000 | bpf_prog_unlock_free(fp: prog); |
3001 | } |
3002 | |
3003 | bool bpf_jit_supports_exceptions(void) |
3004 | { |
3005 | /* We unwind through both kernel frames (starting from within bpf_throw |
3006 | * call) and BPF frames. Therefore we require ORC unwinder to be enabled |
3007 | * to walk kernel frames and reach BPF frames in the stack trace. |
3008 | */ |
3009 | return IS_ENABLED(CONFIG_UNWINDER_ORC); |
3010 | } |
3011 | |
3012 | void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) |
3013 | { |
3014 | #if defined(CONFIG_UNWINDER_ORC) |
3015 | struct unwind_state state; |
3016 | unsigned long addr; |
3017 | |
3018 | for (unwind_start(state: &state, current, NULL, NULL); !unwind_done(state: &state); |
3019 | unwind_next_frame(state: &state)) { |
3020 | addr = unwind_get_return_address(state: &state); |
3021 | if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) |
3022 | break; |
3023 | } |
3024 | return; |
3025 | #endif |
3026 | WARN(1, "verification of programs using bpf_throw should have failed\n" ); |
3027 | } |
3028 | |