1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> |
4 | */ |
5 | |
6 | #include <stdio.h> |
7 | #include <stdlib.h> |
8 | |
9 | #define unlikely(cond) (cond) |
10 | #include <asm/insn.h> |
11 | #include "../../../arch/x86/lib/inat.c" |
12 | #include "../../../arch/x86/lib/insn.c" |
13 | |
14 | #define CONFIG_64BIT 1 |
15 | #include <asm/nops.h> |
16 | |
17 | #include <asm/orc_types.h> |
18 | #include <objtool/check.h> |
19 | #include <objtool/elf.h> |
20 | #include <objtool/arch.h> |
21 | #include <objtool/warn.h> |
22 | #include <objtool/endianness.h> |
23 | #include <objtool/builtin.h> |
24 | #include <arch/elf.h> |
25 | |
26 | int arch_ftrace_match(char *name) |
27 | { |
28 | return !strcmp(name, "__fentry__" ); |
29 | } |
30 | |
31 | static int is_x86_64(const struct elf *elf) |
32 | { |
33 | switch (elf->ehdr.e_machine) { |
34 | case EM_X86_64: |
35 | return 1; |
36 | case EM_386: |
37 | return 0; |
38 | default: |
39 | WARN("unexpected ELF machine type %d" , elf->ehdr.e_machine); |
40 | return -1; |
41 | } |
42 | } |
43 | |
44 | bool arch_callee_saved_reg(unsigned char reg) |
45 | { |
46 | switch (reg) { |
47 | case CFI_BP: |
48 | case CFI_BX: |
49 | case CFI_R12: |
50 | case CFI_R13: |
51 | case CFI_R14: |
52 | case CFI_R15: |
53 | return true; |
54 | |
55 | case CFI_AX: |
56 | case CFI_CX: |
57 | case CFI_DX: |
58 | case CFI_SI: |
59 | case CFI_DI: |
60 | case CFI_SP: |
61 | case CFI_R8: |
62 | case CFI_R9: |
63 | case CFI_R10: |
64 | case CFI_R11: |
65 | case CFI_RA: |
66 | default: |
67 | return false; |
68 | } |
69 | } |
70 | |
71 | unsigned long arch_dest_reloc_offset(int addend) |
72 | { |
73 | return addend + 4; |
74 | } |
75 | |
76 | unsigned long arch_jump_destination(struct instruction *insn) |
77 | { |
78 | return insn->offset + insn->len + insn->immediate; |
79 | } |
80 | |
81 | bool arch_pc_relative_reloc(struct reloc *reloc) |
82 | { |
83 | /* |
84 | * All relocation types where P (the address of the target) |
85 | * is included in the computation. |
86 | */ |
87 | switch (reloc_type(reloc)) { |
88 | case R_X86_64_PC8: |
89 | case R_X86_64_PC16: |
90 | case R_X86_64_PC32: |
91 | case R_X86_64_PC64: |
92 | |
93 | case R_X86_64_PLT32: |
94 | case R_X86_64_GOTPC32: |
95 | case R_X86_64_GOTPCREL: |
96 | return true; |
97 | |
98 | default: |
99 | break; |
100 | } |
101 | |
102 | return false; |
103 | } |
104 | |
105 | #define ADD_OP(op) \ |
106 | if (!(op = calloc(1, sizeof(*op)))) \ |
107 | return -1; \ |
108 | else for (*ops_list = op, ops_list = &op->next; op; op = NULL) |
109 | |
110 | /* |
111 | * Helpers to decode ModRM/SIB: |
112 | * |
113 | * r/m| AX CX DX BX | SP | BP | SI DI | |
114 | * | R8 R9 R10 R11 | R12 | R13 | R14 R15 | |
115 | * Mod+----------------+-----+-----+---------+ |
116 | * 00 | [r/m] |[SIB]|[IP+]| [r/m] | |
117 | * 01 | [r/m + d8] |[S+d]| [r/m + d8] | |
118 | * 10 | [r/m + d32] |[S+D]| [r/m + d32] | |
119 | * 11 | r/ m | |
120 | */ |
121 | |
122 | #define mod_is_mem() (modrm_mod != 3) |
123 | #define mod_is_reg() (modrm_mod == 3) |
124 | |
125 | #define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0) |
126 | #define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem()) |
127 | |
128 | #define rm_is(reg) (have_SIB() ? \ |
129 | sib_base == (reg) && sib_index == CFI_SP : \ |
130 | modrm_rm == (reg)) |
131 | |
132 | #define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg)) |
133 | #define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg)) |
134 | |
135 | static bool has_notrack_prefix(struct insn *insn) |
136 | { |
137 | int i; |
138 | |
139 | for (i = 0; i < insn->prefixes.nbytes; i++) { |
140 | if (insn->prefixes.bytes[i] == 0x3e) |
141 | return true; |
142 | } |
143 | |
144 | return false; |
145 | } |
146 | |
147 | int arch_decode_instruction(struct objtool_file *file, const struct section *sec, |
148 | unsigned long offset, unsigned int maxlen, |
149 | struct instruction *insn) |
150 | { |
151 | struct stack_op **ops_list = &insn->stack_ops; |
152 | const struct elf *elf = file->elf; |
153 | struct insn ins; |
154 | int x86_64, ret; |
155 | unsigned char op1, op2, op3, prefix, |
156 | rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0, |
157 | modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0, |
158 | sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0; |
159 | struct stack_op *op = NULL; |
160 | struct symbol *sym; |
161 | u64 imm; |
162 | |
163 | x86_64 = is_x86_64(elf); |
164 | if (x86_64 == -1) |
165 | return -1; |
166 | |
167 | ret = insn_decode(insn: &ins, kaddr: sec->data->d_buf + offset, buf_len: maxlen, |
168 | m: x86_64 ? INSN_MODE_64 : INSN_MODE_32); |
169 | if (ret < 0) { |
170 | WARN("can't decode instruction at %s:0x%lx" , sec->name, offset); |
171 | return -1; |
172 | } |
173 | |
174 | insn->len = ins.length; |
175 | insn->type = INSN_OTHER; |
176 | |
177 | if (ins.vex_prefix.nbytes) |
178 | return 0; |
179 | |
180 | prefix = ins.prefixes.bytes[0]; |
181 | |
182 | op1 = ins.opcode.bytes[0]; |
183 | op2 = ins.opcode.bytes[1]; |
184 | op3 = ins.opcode.bytes[2]; |
185 | |
186 | if (ins.rex_prefix.nbytes) { |
187 | rex = ins.rex_prefix.bytes[0]; |
188 | rex_w = X86_REX_W(rex) >> 3; |
189 | rex_r = X86_REX_R(rex) >> 2; |
190 | rex_x = X86_REX_X(rex) >> 1; |
191 | rex_b = X86_REX_B(rex); |
192 | } |
193 | |
194 | if (ins.modrm.nbytes) { |
195 | modrm = ins.modrm.bytes[0]; |
196 | modrm_mod = X86_MODRM_MOD(modrm); |
197 | modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r; |
198 | modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b; |
199 | } |
200 | |
201 | if (ins.sib.nbytes) { |
202 | sib = ins.sib.bytes[0]; |
203 | /* sib_scale = X86_SIB_SCALE(sib); */ |
204 | sib_index = X86_SIB_INDEX(sib) + 8*rex_x; |
205 | sib_base = X86_SIB_BASE(sib) + 8*rex_b; |
206 | } |
207 | |
208 | switch (op1) { |
209 | |
210 | case 0x1: |
211 | case 0x29: |
212 | if (rex_w && rm_is_reg(CFI_SP)) { |
213 | |
214 | /* add/sub reg, %rsp */ |
215 | ADD_OP(op) { |
216 | op->src.type = OP_SRC_ADD; |
217 | op->src.reg = modrm_reg; |
218 | op->dest.type = OP_DEST_REG; |
219 | op->dest.reg = CFI_SP; |
220 | } |
221 | } |
222 | break; |
223 | |
224 | case 0x50 ... 0x57: |
225 | |
226 | /* push reg */ |
227 | ADD_OP(op) { |
228 | op->src.type = OP_SRC_REG; |
229 | op->src.reg = (op1 & 0x7) + 8*rex_b; |
230 | op->dest.type = OP_DEST_PUSH; |
231 | } |
232 | |
233 | break; |
234 | |
235 | case 0x58 ... 0x5f: |
236 | |
237 | /* pop reg */ |
238 | ADD_OP(op) { |
239 | op->src.type = OP_SRC_POP; |
240 | op->dest.type = OP_DEST_REG; |
241 | op->dest.reg = (op1 & 0x7) + 8*rex_b; |
242 | } |
243 | |
244 | break; |
245 | |
246 | case 0x68: |
247 | case 0x6a: |
248 | /* push immediate */ |
249 | ADD_OP(op) { |
250 | op->src.type = OP_SRC_CONST; |
251 | op->dest.type = OP_DEST_PUSH; |
252 | } |
253 | break; |
254 | |
255 | case 0x70 ... 0x7f: |
256 | insn->type = INSN_JUMP_CONDITIONAL; |
257 | break; |
258 | |
259 | case 0x80 ... 0x83: |
260 | /* |
261 | * 1000 00sw : mod OP r/m : immediate |
262 | * |
263 | * s - sign extend immediate |
264 | * w - imm8 / imm32 |
265 | * |
266 | * OP: 000 ADD 100 AND |
267 | * 001 OR 101 SUB |
268 | * 010 ADC 110 XOR |
269 | * 011 SBB 111 CMP |
270 | */ |
271 | |
272 | /* 64bit only */ |
273 | if (!rex_w) |
274 | break; |
275 | |
276 | /* %rsp target only */ |
277 | if (!rm_is_reg(CFI_SP)) |
278 | break; |
279 | |
280 | imm = ins.immediate.value; |
281 | if (op1 & 2) { /* sign extend */ |
282 | if (op1 & 1) { /* imm32 */ |
283 | imm <<= 32; |
284 | imm = (s64)imm >> 32; |
285 | } else { /* imm8 */ |
286 | imm <<= 56; |
287 | imm = (s64)imm >> 56; |
288 | } |
289 | } |
290 | |
291 | switch (modrm_reg & 7) { |
292 | case 5: |
293 | imm = -imm; |
294 | fallthrough; |
295 | case 0: |
296 | /* add/sub imm, %rsp */ |
297 | ADD_OP(op) { |
298 | op->src.type = OP_SRC_ADD; |
299 | op->src.reg = CFI_SP; |
300 | op->src.offset = imm; |
301 | op->dest.type = OP_DEST_REG; |
302 | op->dest.reg = CFI_SP; |
303 | } |
304 | break; |
305 | |
306 | case 4: |
307 | /* and imm, %rsp */ |
308 | ADD_OP(op) { |
309 | op->src.type = OP_SRC_AND; |
310 | op->src.reg = CFI_SP; |
311 | op->src.offset = ins.immediate.value; |
312 | op->dest.type = OP_DEST_REG; |
313 | op->dest.reg = CFI_SP; |
314 | } |
315 | break; |
316 | |
317 | default: |
318 | /* WARN ? */ |
319 | break; |
320 | } |
321 | |
322 | break; |
323 | |
324 | case 0x89: |
325 | if (!rex_w) |
326 | break; |
327 | |
328 | if (modrm_reg == CFI_SP) { |
329 | |
330 | if (mod_is_reg()) { |
331 | /* mov %rsp, reg */ |
332 | ADD_OP(op) { |
333 | op->src.type = OP_SRC_REG; |
334 | op->src.reg = CFI_SP; |
335 | op->dest.type = OP_DEST_REG; |
336 | op->dest.reg = modrm_rm; |
337 | } |
338 | break; |
339 | |
340 | } else { |
341 | /* skip RIP relative displacement */ |
342 | if (is_RIP()) |
343 | break; |
344 | |
345 | /* skip nontrivial SIB */ |
346 | if (have_SIB()) { |
347 | modrm_rm = sib_base; |
348 | if (sib_index != CFI_SP) |
349 | break; |
350 | } |
351 | |
352 | /* mov %rsp, disp(%reg) */ |
353 | ADD_OP(op) { |
354 | op->src.type = OP_SRC_REG; |
355 | op->src.reg = CFI_SP; |
356 | op->dest.type = OP_DEST_REG_INDIRECT; |
357 | op->dest.reg = modrm_rm; |
358 | op->dest.offset = ins.displacement.value; |
359 | } |
360 | break; |
361 | } |
362 | |
363 | break; |
364 | } |
365 | |
366 | if (rm_is_reg(CFI_SP)) { |
367 | |
368 | /* mov reg, %rsp */ |
369 | ADD_OP(op) { |
370 | op->src.type = OP_SRC_REG; |
371 | op->src.reg = modrm_reg; |
372 | op->dest.type = OP_DEST_REG; |
373 | op->dest.reg = CFI_SP; |
374 | } |
375 | break; |
376 | } |
377 | |
378 | fallthrough; |
379 | case 0x88: |
380 | if (!rex_w) |
381 | break; |
382 | |
383 | if (rm_is_mem(CFI_BP)) { |
384 | |
385 | /* mov reg, disp(%rbp) */ |
386 | ADD_OP(op) { |
387 | op->src.type = OP_SRC_REG; |
388 | op->src.reg = modrm_reg; |
389 | op->dest.type = OP_DEST_REG_INDIRECT; |
390 | op->dest.reg = CFI_BP; |
391 | op->dest.offset = ins.displacement.value; |
392 | } |
393 | break; |
394 | } |
395 | |
396 | if (rm_is_mem(CFI_SP)) { |
397 | |
398 | /* mov reg, disp(%rsp) */ |
399 | ADD_OP(op) { |
400 | op->src.type = OP_SRC_REG; |
401 | op->src.reg = modrm_reg; |
402 | op->dest.type = OP_DEST_REG_INDIRECT; |
403 | op->dest.reg = CFI_SP; |
404 | op->dest.offset = ins.displacement.value; |
405 | } |
406 | break; |
407 | } |
408 | |
409 | break; |
410 | |
411 | case 0x8b: |
412 | if (!rex_w) |
413 | break; |
414 | |
415 | if (rm_is_mem(CFI_BP)) { |
416 | |
417 | /* mov disp(%rbp), reg */ |
418 | ADD_OP(op) { |
419 | op->src.type = OP_SRC_REG_INDIRECT; |
420 | op->src.reg = CFI_BP; |
421 | op->src.offset = ins.displacement.value; |
422 | op->dest.type = OP_DEST_REG; |
423 | op->dest.reg = modrm_reg; |
424 | } |
425 | break; |
426 | } |
427 | |
428 | if (rm_is_mem(CFI_SP)) { |
429 | |
430 | /* mov disp(%rsp), reg */ |
431 | ADD_OP(op) { |
432 | op->src.type = OP_SRC_REG_INDIRECT; |
433 | op->src.reg = CFI_SP; |
434 | op->src.offset = ins.displacement.value; |
435 | op->dest.type = OP_DEST_REG; |
436 | op->dest.reg = modrm_reg; |
437 | } |
438 | break; |
439 | } |
440 | |
441 | break; |
442 | |
443 | case 0x8d: |
444 | if (mod_is_reg()) { |
445 | WARN("invalid LEA encoding at %s:0x%lx" , sec->name, offset); |
446 | break; |
447 | } |
448 | |
449 | /* skip non 64bit ops */ |
450 | if (!rex_w) |
451 | break; |
452 | |
453 | /* skip RIP relative displacement */ |
454 | if (is_RIP()) |
455 | break; |
456 | |
457 | /* skip nontrivial SIB */ |
458 | if (have_SIB()) { |
459 | modrm_rm = sib_base; |
460 | if (sib_index != CFI_SP) |
461 | break; |
462 | } |
463 | |
464 | /* lea disp(%src), %dst */ |
465 | ADD_OP(op) { |
466 | op->src.offset = ins.displacement.value; |
467 | if (!op->src.offset) { |
468 | /* lea (%src), %dst */ |
469 | op->src.type = OP_SRC_REG; |
470 | } else { |
471 | /* lea disp(%src), %dst */ |
472 | op->src.type = OP_SRC_ADD; |
473 | } |
474 | op->src.reg = modrm_rm; |
475 | op->dest.type = OP_DEST_REG; |
476 | op->dest.reg = modrm_reg; |
477 | } |
478 | break; |
479 | |
480 | case 0x8f: |
481 | /* pop to mem */ |
482 | ADD_OP(op) { |
483 | op->src.type = OP_SRC_POP; |
484 | op->dest.type = OP_DEST_MEM; |
485 | } |
486 | break; |
487 | |
488 | case 0x90: |
489 | insn->type = INSN_NOP; |
490 | break; |
491 | |
492 | case 0x9c: |
493 | /* pushf */ |
494 | ADD_OP(op) { |
495 | op->src.type = OP_SRC_CONST; |
496 | op->dest.type = OP_DEST_PUSHF; |
497 | } |
498 | break; |
499 | |
500 | case 0x9d: |
501 | /* popf */ |
502 | ADD_OP(op) { |
503 | op->src.type = OP_SRC_POPF; |
504 | op->dest.type = OP_DEST_MEM; |
505 | } |
506 | break; |
507 | |
508 | case 0x0f: |
509 | |
510 | if (op2 == 0x01) { |
511 | |
512 | switch (insn_last_prefix_id(insn: &ins)) { |
513 | case INAT_PFX_REPE: |
514 | case INAT_PFX_REPNE: |
515 | if (modrm == 0xca) |
516 | /* eretu/erets */ |
517 | insn->type = INSN_CONTEXT_SWITCH; |
518 | break; |
519 | default: |
520 | if (modrm == 0xca) |
521 | insn->type = INSN_CLAC; |
522 | else if (modrm == 0xcb) |
523 | insn->type = INSN_STAC; |
524 | break; |
525 | } |
526 | } else if (op2 >= 0x80 && op2 <= 0x8f) { |
527 | |
528 | insn->type = INSN_JUMP_CONDITIONAL; |
529 | |
530 | } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 || |
531 | op2 == 0x35) { |
532 | |
533 | /* sysenter, sysret */ |
534 | insn->type = INSN_CONTEXT_SWITCH; |
535 | |
536 | } else if (op2 == 0x0b || op2 == 0xb9) { |
537 | |
538 | /* ud2 */ |
539 | insn->type = INSN_BUG; |
540 | |
541 | } else if (op2 == 0x0d || op2 == 0x1f) { |
542 | |
543 | /* nopl/nopw */ |
544 | insn->type = INSN_NOP; |
545 | |
546 | } else if (op2 == 0x1e) { |
547 | |
548 | if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb)) |
549 | insn->type = INSN_ENDBR; |
550 | |
551 | |
552 | } else if (op2 == 0x38 && op3 == 0xf8) { |
553 | if (ins.prefixes.nbytes == 1 && |
554 | ins.prefixes.bytes[0] == 0xf2) { |
555 | /* ENQCMD cannot be used in the kernel. */ |
556 | WARN("ENQCMD instruction at %s:%lx" , sec->name, |
557 | offset); |
558 | } |
559 | |
560 | } else if (op2 == 0xa0 || op2 == 0xa8) { |
561 | |
562 | /* push fs/gs */ |
563 | ADD_OP(op) { |
564 | op->src.type = OP_SRC_CONST; |
565 | op->dest.type = OP_DEST_PUSH; |
566 | } |
567 | |
568 | } else if (op2 == 0xa1 || op2 == 0xa9) { |
569 | |
570 | /* pop fs/gs */ |
571 | ADD_OP(op) { |
572 | op->src.type = OP_SRC_POP; |
573 | op->dest.type = OP_DEST_MEM; |
574 | } |
575 | } |
576 | |
577 | break; |
578 | |
579 | case 0xc9: |
580 | /* |
581 | * leave |
582 | * |
583 | * equivalent to: |
584 | * mov bp, sp |
585 | * pop bp |
586 | */ |
587 | ADD_OP(op) { |
588 | op->src.type = OP_SRC_REG; |
589 | op->src.reg = CFI_BP; |
590 | op->dest.type = OP_DEST_REG; |
591 | op->dest.reg = CFI_SP; |
592 | } |
593 | ADD_OP(op) { |
594 | op->src.type = OP_SRC_POP; |
595 | op->dest.type = OP_DEST_REG; |
596 | op->dest.reg = CFI_BP; |
597 | } |
598 | break; |
599 | |
600 | case 0xcc: |
601 | /* int3 */ |
602 | insn->type = INSN_TRAP; |
603 | break; |
604 | |
605 | case 0xe3: |
606 | /* jecxz/jrcxz */ |
607 | insn->type = INSN_JUMP_CONDITIONAL; |
608 | break; |
609 | |
610 | case 0xe9: |
611 | case 0xeb: |
612 | insn->type = INSN_JUMP_UNCONDITIONAL; |
613 | break; |
614 | |
615 | case 0xc2: |
616 | case 0xc3: |
617 | insn->type = INSN_RETURN; |
618 | break; |
619 | |
620 | case 0xc7: /* mov imm, r/m */ |
621 | if (!opts.noinstr) |
622 | break; |
623 | |
624 | if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text" , 10)) { |
625 | struct reloc *immr, *disp; |
626 | struct symbol *func; |
627 | int idx; |
628 | |
629 | immr = find_reloc_by_dest(elf, (void *)sec, offset+3); |
630 | disp = find_reloc_by_dest(elf, (void *)sec, offset+7); |
631 | |
632 | if (!immr || strcmp(immr->sym->name, "pv_ops" )) |
633 | break; |
634 | |
635 | idx = (reloc_addend(immr) + 8) / sizeof(void *); |
636 | |
637 | func = disp->sym; |
638 | if (disp->sym->type == STT_SECTION) |
639 | func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp)); |
640 | if (!func) { |
641 | WARN("no func for pv_ops[]" ); |
642 | return -1; |
643 | } |
644 | |
645 | objtool_pv_add(file, idx, func); |
646 | } |
647 | |
648 | break; |
649 | |
650 | case 0xcf: /* iret */ |
651 | /* |
652 | * Handle sync_core(), which has an IRET to self. |
653 | * All other IRET are in STT_NONE entry code. |
654 | */ |
655 | sym = find_symbol_containing(sec, offset); |
656 | if (sym && sym->type == STT_FUNC) { |
657 | ADD_OP(op) { |
658 | /* add $40, %rsp */ |
659 | op->src.type = OP_SRC_ADD; |
660 | op->src.reg = CFI_SP; |
661 | op->src.offset = 5*8; |
662 | op->dest.type = OP_DEST_REG; |
663 | op->dest.reg = CFI_SP; |
664 | } |
665 | break; |
666 | } |
667 | |
668 | fallthrough; |
669 | |
670 | case 0xca: /* retf */ |
671 | case 0xcb: /* retf */ |
672 | insn->type = INSN_CONTEXT_SWITCH; |
673 | break; |
674 | |
675 | case 0xe0: /* loopne */ |
676 | case 0xe1: /* loope */ |
677 | case 0xe2: /* loop */ |
678 | insn->type = INSN_JUMP_CONDITIONAL; |
679 | break; |
680 | |
681 | case 0xe8: |
682 | insn->type = INSN_CALL; |
683 | /* |
684 | * For the impact on the stack, a CALL behaves like |
685 | * a PUSH of an immediate value (the return address). |
686 | */ |
687 | ADD_OP(op) { |
688 | op->src.type = OP_SRC_CONST; |
689 | op->dest.type = OP_DEST_PUSH; |
690 | } |
691 | break; |
692 | |
693 | case 0xfc: |
694 | insn->type = INSN_CLD; |
695 | break; |
696 | |
697 | case 0xfd: |
698 | insn->type = INSN_STD; |
699 | break; |
700 | |
701 | case 0xff: |
702 | if (modrm_reg == 2 || modrm_reg == 3) { |
703 | |
704 | insn->type = INSN_CALL_DYNAMIC; |
705 | if (has_notrack_prefix(insn: &ins)) |
706 | WARN("notrack prefix found at %s:0x%lx" , sec->name, offset); |
707 | |
708 | } else if (modrm_reg == 4) { |
709 | |
710 | insn->type = INSN_JUMP_DYNAMIC; |
711 | if (has_notrack_prefix(insn: &ins)) |
712 | WARN("notrack prefix found at %s:0x%lx" , sec->name, offset); |
713 | |
714 | } else if (modrm_reg == 5) { |
715 | |
716 | /* jmpf */ |
717 | insn->type = INSN_CONTEXT_SWITCH; |
718 | |
719 | } else if (modrm_reg == 6) { |
720 | |
721 | /* push from mem */ |
722 | ADD_OP(op) { |
723 | op->src.type = OP_SRC_CONST; |
724 | op->dest.type = OP_DEST_PUSH; |
725 | } |
726 | } |
727 | |
728 | break; |
729 | |
730 | default: |
731 | break; |
732 | } |
733 | |
734 | insn->immediate = ins.immediate.nbytes ? ins.immediate.value : 0; |
735 | |
736 | return 0; |
737 | } |
738 | |
739 | void arch_initial_func_cfi_state(struct cfi_init_state *state) |
740 | { |
741 | int i; |
742 | |
743 | for (i = 0; i < CFI_NUM_REGS; i++) { |
744 | state->regs[i].base = CFI_UNDEFINED; |
745 | state->regs[i].offset = 0; |
746 | } |
747 | |
748 | /* initial CFA (call frame address) */ |
749 | state->cfa.base = CFI_SP; |
750 | state->cfa.offset = 8; |
751 | |
752 | /* initial RA (return address) */ |
753 | state->regs[CFI_RA].base = CFI_CFA; |
754 | state->regs[CFI_RA].offset = -8; |
755 | } |
756 | |
757 | const char *arch_nop_insn(int len) |
758 | { |
759 | static const char nops[5][5] = { |
760 | { BYTES_NOP1 }, |
761 | { BYTES_NOP2 }, |
762 | { BYTES_NOP3 }, |
763 | { BYTES_NOP4 }, |
764 | { BYTES_NOP5 }, |
765 | }; |
766 | |
767 | if (len < 1 || len > 5) { |
768 | WARN("invalid NOP size: %d\n" , len); |
769 | return NULL; |
770 | } |
771 | |
772 | return nops[len-1]; |
773 | } |
774 | |
775 | #define BYTE_RET 0xC3 |
776 | |
777 | const char *arch_ret_insn(int len) |
778 | { |
779 | static const char ret[5][5] = { |
780 | { BYTE_RET }, |
781 | { BYTE_RET, 0xcc }, |
782 | { BYTE_RET, 0xcc, BYTES_NOP1 }, |
783 | { BYTE_RET, 0xcc, BYTES_NOP2 }, |
784 | { BYTE_RET, 0xcc, BYTES_NOP3 }, |
785 | }; |
786 | |
787 | if (len < 1 || len > 5) { |
788 | WARN("invalid RET size: %d\n" , len); |
789 | return NULL; |
790 | } |
791 | |
792 | return ret[len-1]; |
793 | } |
794 | |
795 | int arch_decode_hint_reg(u8 sp_reg, int *base) |
796 | { |
797 | switch (sp_reg) { |
798 | case ORC_REG_UNDEFINED: |
799 | *base = CFI_UNDEFINED; |
800 | break; |
801 | case ORC_REG_SP: |
802 | *base = CFI_SP; |
803 | break; |
804 | case ORC_REG_BP: |
805 | *base = CFI_BP; |
806 | break; |
807 | case ORC_REG_SP_INDIRECT: |
808 | *base = CFI_SP_INDIRECT; |
809 | break; |
810 | case ORC_REG_R10: |
811 | *base = CFI_R10; |
812 | break; |
813 | case ORC_REG_R13: |
814 | *base = CFI_R13; |
815 | break; |
816 | case ORC_REG_DI: |
817 | *base = CFI_DI; |
818 | break; |
819 | case ORC_REG_DX: |
820 | *base = CFI_DX; |
821 | break; |
822 | default: |
823 | return -1; |
824 | } |
825 | |
826 | return 0; |
827 | } |
828 | |
829 | bool arch_is_retpoline(struct symbol *sym) |
830 | { |
831 | return !strncmp(sym->name, "__x86_indirect_" , 15); |
832 | } |
833 | |
834 | bool arch_is_rethunk(struct symbol *sym) |
835 | { |
836 | return !strcmp(sym->name, "__x86_return_thunk" ); |
837 | } |
838 | |
839 | bool arch_is_embedded_insn(struct symbol *sym) |
840 | { |
841 | return !strcmp(sym->name, "retbleed_return_thunk" ) || |
842 | !strcmp(sym->name, "srso_safe_ret" ); |
843 | } |
844 | |