1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | #define pr_fmt(fmt) "SMP alternatives: " fmt |
3 | |
4 | #include <linux/module.h> |
5 | #include <linux/sched.h> |
6 | #include <linux/perf_event.h> |
7 | #include <linux/mutex.h> |
8 | #include <linux/list.h> |
9 | #include <linux/stringify.h> |
10 | #include <linux/highmem.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/vmalloc.h> |
13 | #include <linux/memory.h> |
14 | #include <linux/stop_machine.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/kdebug.h> |
17 | #include <linux/kprobes.h> |
18 | #include <linux/mmu_context.h> |
19 | #include <linux/bsearch.h> |
20 | #include <linux/sync_core.h> |
21 | #include <asm/text-patching.h> |
22 | #include <asm/alternative.h> |
23 | #include <asm/sections.h> |
24 | #include <asm/mce.h> |
25 | #include <asm/nmi.h> |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/tlbflush.h> |
28 | #include <asm/insn.h> |
29 | #include <asm/io.h> |
30 | #include <asm/fixmap.h> |
31 | #include <asm/paravirt.h> |
32 | #include <asm/asm-prototypes.h> |
33 | |
34 | int __read_mostly alternatives_patched; |
35 | |
36 | EXPORT_SYMBOL_GPL(alternatives_patched); |
37 | |
38 | #define MAX_PATCH_LEN (255-1) |
39 | |
40 | #define DA_ALL (~0) |
41 | #define DA_ALT 0x01 |
42 | #define DA_RET 0x02 |
43 | #define DA_RETPOLINE 0x04 |
44 | #define DA_ENDBR 0x08 |
45 | #define DA_SMP 0x10 |
46 | |
47 | static unsigned int __initdata_or_module debug_alternative; |
48 | |
49 | static int __init debug_alt(char *str) |
50 | { |
51 | if (str && *str == '=') |
52 | str++; |
53 | |
54 | if (!str || kstrtouint(s: str, base: 0, res: &debug_alternative)) |
55 | debug_alternative = DA_ALL; |
56 | |
57 | return 1; |
58 | } |
59 | __setup("debug-alternative" , debug_alt); |
60 | |
61 | static int noreplace_smp; |
62 | |
63 | static int __init setup_noreplace_smp(char *str) |
64 | { |
65 | noreplace_smp = 1; |
66 | return 1; |
67 | } |
68 | __setup("noreplace-smp" , setup_noreplace_smp); |
69 | |
70 | #define DPRINTK(type, fmt, args...) \ |
71 | do { \ |
72 | if (debug_alternative & DA_##type) \ |
73 | printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \ |
74 | } while (0) |
75 | |
76 | #define DUMP_BYTES(type, buf, len, fmt, args...) \ |
77 | do { \ |
78 | if (unlikely(debug_alternative & DA_##type)) { \ |
79 | int j; \ |
80 | \ |
81 | if (!(len)) \ |
82 | break; \ |
83 | \ |
84 | printk(KERN_DEBUG pr_fmt(fmt), ##args); \ |
85 | for (j = 0; j < (len) - 1; j++) \ |
86 | printk(KERN_CONT "%02hhx ", buf[j]); \ |
87 | printk(KERN_CONT "%02hhx\n", buf[j]); \ |
88 | } \ |
89 | } while (0) |
90 | |
91 | static const unsigned char x86nops[] = |
92 | { |
93 | BYTES_NOP1, |
94 | BYTES_NOP2, |
95 | BYTES_NOP3, |
96 | BYTES_NOP4, |
97 | BYTES_NOP5, |
98 | BYTES_NOP6, |
99 | BYTES_NOP7, |
100 | BYTES_NOP8, |
101 | #ifdef CONFIG_64BIT |
102 | BYTES_NOP9, |
103 | BYTES_NOP10, |
104 | BYTES_NOP11, |
105 | #endif |
106 | }; |
107 | |
108 | const unsigned char * const x86_nops[ASM_NOP_MAX+1] = |
109 | { |
110 | NULL, |
111 | x86nops, |
112 | x86nops + 1, |
113 | x86nops + 1 + 2, |
114 | x86nops + 1 + 2 + 3, |
115 | x86nops + 1 + 2 + 3 + 4, |
116 | x86nops + 1 + 2 + 3 + 4 + 5, |
117 | x86nops + 1 + 2 + 3 + 4 + 5 + 6, |
118 | x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, |
119 | #ifdef CONFIG_64BIT |
120 | x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, |
121 | x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9, |
122 | x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10, |
123 | #endif |
124 | }; |
125 | |
126 | /* |
127 | * Fill the buffer with a single effective instruction of size @len. |
128 | * |
129 | * In order not to issue an ORC stack depth tracking CFI entry (Call Frame Info) |
130 | * for every single-byte NOP, try to generate the maximally available NOP of |
131 | * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for |
132 | * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and |
133 | * *jump* over instead of executing long and daft NOPs. |
134 | */ |
135 | static void __init_or_module add_nop(u8 *instr, unsigned int len) |
136 | { |
137 | u8 *target = instr + len; |
138 | |
139 | if (!len) |
140 | return; |
141 | |
142 | if (len <= ASM_NOP_MAX) { |
143 | memcpy(instr, x86_nops[len], len); |
144 | return; |
145 | } |
146 | |
147 | if (len < 128) { |
148 | __text_gen_insn(buf: instr, JMP8_INSN_OPCODE, addr: instr, dest: target, JMP8_INSN_SIZE); |
149 | instr += JMP8_INSN_SIZE; |
150 | } else { |
151 | __text_gen_insn(buf: instr, JMP32_INSN_OPCODE, addr: instr, dest: target, JMP32_INSN_SIZE); |
152 | instr += JMP32_INSN_SIZE; |
153 | } |
154 | |
155 | for (;instr < target; instr++) |
156 | *instr = INT3_INSN_OPCODE; |
157 | } |
158 | |
159 | extern s32 __retpoline_sites[], __retpoline_sites_end[]; |
160 | extern s32 __return_sites[], __return_sites_end[]; |
161 | extern s32 __cfi_sites[], __cfi_sites_end[]; |
162 | extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; |
163 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
164 | extern s32 __smp_locks[], __smp_locks_end[]; |
165 | void text_poke_early(void *addr, const void *opcode, size_t len); |
166 | |
167 | /* |
168 | * Matches NOP and NOPL, not any of the other possible NOPs. |
169 | */ |
170 | static bool insn_is_nop(struct insn *insn) |
171 | { |
172 | /* Anything NOP, but no REP NOP */ |
173 | if (insn->opcode.bytes[0] == 0x90 && |
174 | (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3)) |
175 | return true; |
176 | |
177 | /* NOPL */ |
178 | if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F) |
179 | return true; |
180 | |
181 | /* TODO: more nops */ |
182 | |
183 | return false; |
184 | } |
185 | |
186 | /* |
187 | * Find the offset of the first non-NOP instruction starting at @offset |
188 | * but no further than @len. |
189 | */ |
190 | static int skip_nops(u8 *instr, int offset, int len) |
191 | { |
192 | struct insn insn; |
193 | |
194 | for (; offset < len; offset += insn.length) { |
195 | if (insn_decode_kernel(&insn, &instr[offset])) |
196 | break; |
197 | |
198 | if (!insn_is_nop(insn: &insn)) |
199 | break; |
200 | } |
201 | |
202 | return offset; |
203 | } |
204 | |
205 | /* |
206 | * Optimize a sequence of NOPs, possibly preceded by an unconditional jump |
207 | * to the end of the NOP sequence into a single NOP. |
208 | */ |
209 | static bool __init_or_module |
210 | __optimize_nops(u8 *instr, size_t len, struct insn *insn, int *next, int *prev, int *target) |
211 | { |
212 | int i = *next - insn->length; |
213 | |
214 | switch (insn->opcode.bytes[0]) { |
215 | case JMP8_INSN_OPCODE: |
216 | case JMP32_INSN_OPCODE: |
217 | *prev = i; |
218 | *target = *next + insn->immediate.value; |
219 | return false; |
220 | } |
221 | |
222 | if (insn_is_nop(insn)) { |
223 | int nop = i; |
224 | |
225 | *next = skip_nops(instr, offset: *next, len); |
226 | if (*target && *next == *target) |
227 | nop = *prev; |
228 | |
229 | add_nop(instr: instr + nop, len: *next - nop); |
230 | DUMP_BYTES(ALT, instr, len, "%px: [%d:%d) optimized NOPs: " , instr, nop, *next); |
231 | return true; |
232 | } |
233 | |
234 | *target = 0; |
235 | return false; |
236 | } |
237 | |
238 | /* |
239 | * "noinline" to cause control flow change and thus invalidate I$ and |
240 | * cause refetch after modification. |
241 | */ |
242 | static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) |
243 | { |
244 | int prev, target = 0; |
245 | |
246 | for (int next, i = 0; i < len; i = next) { |
247 | struct insn insn; |
248 | |
249 | if (insn_decode_kernel(&insn, &instr[i])) |
250 | return; |
251 | |
252 | next = i + insn.length; |
253 | |
254 | __optimize_nops(instr, len, insn: &insn, next: &next, prev: &prev, target: &target); |
255 | } |
256 | } |
257 | |
258 | /* |
259 | * In this context, "source" is where the instructions are placed in the |
260 | * section .altinstr_replacement, for example during kernel build by the |
261 | * toolchain. |
262 | * "Destination" is where the instructions are being patched in by this |
263 | * machinery. |
264 | * |
265 | * The source offset is: |
266 | * |
267 | * src_imm = target - src_next_ip (1) |
268 | * |
269 | * and the target offset is: |
270 | * |
271 | * dst_imm = target - dst_next_ip (2) |
272 | * |
273 | * so rework (1) as an expression for target like: |
274 | * |
275 | * target = src_imm + src_next_ip (1a) |
276 | * |
277 | * and substitute in (2) to get: |
278 | * |
279 | * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3) |
280 | * |
281 | * Now, since the instruction stream is 'identical' at src and dst (it |
282 | * is being copied after all) it can be stated that: |
283 | * |
284 | * src_next_ip = src + ip_offset |
285 | * dst_next_ip = dst + ip_offset (4) |
286 | * |
287 | * Substitute (4) in (3) and observe ip_offset being cancelled out to |
288 | * obtain: |
289 | * |
290 | * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset) |
291 | * = src_imm + src - dst + ip_offset - ip_offset |
292 | * = src_imm + src - dst (5) |
293 | * |
294 | * IOW, only the relative displacement of the code block matters. |
295 | */ |
296 | |
297 | #define apply_reloc_n(n_, p_, d_) \ |
298 | do { \ |
299 | s32 v = *(s##n_ *)(p_); \ |
300 | v += (d_); \ |
301 | BUG_ON((v >> 31) != (v >> (n_-1))); \ |
302 | *(s##n_ *)(p_) = (s##n_)v; \ |
303 | } while (0) |
304 | |
305 | |
306 | static __always_inline |
307 | void apply_reloc(int n, void *ptr, uintptr_t diff) |
308 | { |
309 | switch (n) { |
310 | case 1: apply_reloc_n(8, ptr, diff); break; |
311 | case 2: apply_reloc_n(16, ptr, diff); break; |
312 | case 4: apply_reloc_n(32, ptr, diff); break; |
313 | default: BUG(); |
314 | } |
315 | } |
316 | |
317 | static __always_inline |
318 | bool need_reloc(unsigned long offset, u8 *src, size_t src_len) |
319 | { |
320 | u8 *target = src + offset; |
321 | /* |
322 | * If the target is inside the patched block, it's relative to the |
323 | * block itself and does not need relocation. |
324 | */ |
325 | return (target < src || target > src + src_len); |
326 | } |
327 | |
328 | static void __init_or_module noinline |
329 | apply_relocation(u8 *buf, size_t len, u8 *dest, u8 *src, size_t src_len) |
330 | { |
331 | int prev, target = 0; |
332 | |
333 | for (int next, i = 0; i < len; i = next) { |
334 | struct insn insn; |
335 | |
336 | if (WARN_ON_ONCE(insn_decode_kernel(&insn, &buf[i]))) |
337 | return; |
338 | |
339 | next = i + insn.length; |
340 | |
341 | if (__optimize_nops(instr: buf, len, insn: &insn, next: &next, prev: &prev, target: &target)) |
342 | continue; |
343 | |
344 | switch (insn.opcode.bytes[0]) { |
345 | case 0x0f: |
346 | if (insn.opcode.bytes[1] < 0x80 || |
347 | insn.opcode.bytes[1] > 0x8f) |
348 | break; |
349 | |
350 | fallthrough; /* Jcc.d32 */ |
351 | case 0x70 ... 0x7f: /* Jcc.d8 */ |
352 | case JMP8_INSN_OPCODE: |
353 | case JMP32_INSN_OPCODE: |
354 | case CALL_INSN_OPCODE: |
355 | if (need_reloc(offset: next + insn.immediate.value, src, src_len)) { |
356 | apply_reloc(n: insn.immediate.nbytes, |
357 | ptr: buf + i + insn_offset_immediate(insn: &insn), |
358 | diff: src - dest); |
359 | } |
360 | |
361 | /* |
362 | * Where possible, convert JMP.d32 into JMP.d8. |
363 | */ |
364 | if (insn.opcode.bytes[0] == JMP32_INSN_OPCODE) { |
365 | s32 imm = insn.immediate.value; |
366 | imm += src - dest; |
367 | imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE; |
368 | if ((imm >> 31) == (imm >> 7)) { |
369 | buf[i+0] = JMP8_INSN_OPCODE; |
370 | buf[i+1] = (s8)imm; |
371 | |
372 | memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2); |
373 | } |
374 | } |
375 | break; |
376 | } |
377 | |
378 | if (insn_rip_relative(insn: &insn)) { |
379 | if (need_reloc(offset: next + insn.displacement.value, src, src_len)) { |
380 | apply_reloc(n: insn.displacement.nbytes, |
381 | ptr: buf + i + insn_offset_displacement(insn: &insn), |
382 | diff: src - dest); |
383 | } |
384 | } |
385 | } |
386 | } |
387 | |
388 | /* |
389 | * Replace instructions with better alternatives for this CPU type. This runs |
390 | * before SMP is initialized to avoid SMP problems with self modifying code. |
391 | * This implies that asymmetric systems where APs have less capabilities than |
392 | * the boot processor are not handled. Tough. Make sure you disable such |
393 | * features by hand. |
394 | * |
395 | * Marked "noinline" to cause control flow change and thus insn cache |
396 | * to refetch changed I$ lines. |
397 | */ |
398 | void __init_or_module noinline apply_alternatives(struct alt_instr *start, |
399 | struct alt_instr *end) |
400 | { |
401 | struct alt_instr *a; |
402 | u8 *instr, *replacement; |
403 | u8 insn_buff[MAX_PATCH_LEN]; |
404 | |
405 | DPRINTK(ALT, "alt table %px, -> %px" , start, end); |
406 | |
407 | /* |
408 | * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using |
409 | * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. |
410 | * During the process, KASAN becomes confused seeing partial LA57 |
411 | * conversion and triggers a false-positive out-of-bound report. |
412 | * |
413 | * Disable KASAN until the patching is complete. |
414 | */ |
415 | kasan_disable_current(); |
416 | |
417 | /* |
418 | * The scan order should be from start to end. A later scanned |
419 | * alternative code can overwrite previously scanned alternative code. |
420 | * Some kernel functions (e.g. memcpy, memset, etc) use this order to |
421 | * patch code. |
422 | * |
423 | * So be careful if you want to change the scan order to any other |
424 | * order. |
425 | */ |
426 | for (a = start; a < end; a++) { |
427 | int insn_buff_sz = 0; |
428 | |
429 | instr = (u8 *)&a->instr_offset + a->instr_offset; |
430 | replacement = (u8 *)&a->repl_offset + a->repl_offset; |
431 | BUG_ON(a->instrlen > sizeof(insn_buff)); |
432 | BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); |
433 | |
434 | /* |
435 | * Patch if either: |
436 | * - feature is present |
437 | * - feature not present but ALT_FLAG_NOT is set to mean, |
438 | * patch if feature is *NOT* present. |
439 | */ |
440 | if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { |
441 | optimize_nops(instr, len: a->instrlen); |
442 | continue; |
443 | } |
444 | |
445 | DPRINTK(ALT, "feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)" , |
446 | (a->flags & ALT_FLAG_NOT) ? "!" : "" , |
447 | a->cpuid >> 5, |
448 | a->cpuid & 0x1f, |
449 | instr, instr, a->instrlen, |
450 | replacement, a->replacementlen); |
451 | |
452 | memcpy(insn_buff, replacement, a->replacementlen); |
453 | insn_buff_sz = a->replacementlen; |
454 | |
455 | for (; insn_buff_sz < a->instrlen; insn_buff_sz++) |
456 | insn_buff[insn_buff_sz] = 0x90; |
457 | |
458 | apply_relocation(buf: insn_buff, len: a->instrlen, dest: instr, src: replacement, src_len: a->replacementlen); |
459 | |
460 | DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: " , instr); |
461 | DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: " , replacement); |
462 | DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: " , instr); |
463 | |
464 | text_poke_early(addr: instr, opcode: insn_buff, len: insn_buff_sz); |
465 | } |
466 | |
467 | kasan_enable_current(); |
468 | } |
469 | |
470 | static inline bool is_jcc32(struct insn *insn) |
471 | { |
472 | /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ |
473 | return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; |
474 | } |
475 | |
476 | #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL) |
477 | |
478 | /* |
479 | * CALL/JMP *%\reg |
480 | */ |
481 | static int emit_indirect(int op, int reg, u8 *bytes) |
482 | { |
483 | int i = 0; |
484 | u8 modrm; |
485 | |
486 | switch (op) { |
487 | case CALL_INSN_OPCODE: |
488 | modrm = 0x10; /* Reg = 2; CALL r/m */ |
489 | break; |
490 | |
491 | case JMP32_INSN_OPCODE: |
492 | modrm = 0x20; /* Reg = 4; JMP r/m */ |
493 | break; |
494 | |
495 | default: |
496 | WARN_ON_ONCE(1); |
497 | return -1; |
498 | } |
499 | |
500 | if (reg >= 8) { |
501 | bytes[i++] = 0x41; /* REX.B prefix */ |
502 | reg -= 8; |
503 | } |
504 | |
505 | modrm |= 0xc0; /* Mod = 3 */ |
506 | modrm += reg; |
507 | |
508 | bytes[i++] = 0xff; /* opcode */ |
509 | bytes[i++] = modrm; |
510 | |
511 | return i; |
512 | } |
513 | |
514 | static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) |
515 | { |
516 | u8 op = insn->opcode.bytes[0]; |
517 | int i = 0; |
518 | |
519 | /* |
520 | * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional |
521 | * tail-calls. Deal with them. |
522 | */ |
523 | if (is_jcc32(insn)) { |
524 | bytes[i++] = op; |
525 | op = insn->opcode.bytes[1]; |
526 | goto clang_jcc; |
527 | } |
528 | |
529 | if (insn->length == 6) |
530 | bytes[i++] = 0x2e; /* CS-prefix */ |
531 | |
532 | switch (op) { |
533 | case CALL_INSN_OPCODE: |
534 | __text_gen_insn(buf: bytes+i, opcode: op, addr: addr+i, |
535 | dest: __x86_indirect_call_thunk_array[reg], |
536 | CALL_INSN_SIZE); |
537 | i += CALL_INSN_SIZE; |
538 | break; |
539 | |
540 | case JMP32_INSN_OPCODE: |
541 | clang_jcc: |
542 | __text_gen_insn(buf: bytes+i, opcode: op, addr: addr+i, |
543 | dest: __x86_indirect_jump_thunk_array[reg], |
544 | JMP32_INSN_SIZE); |
545 | i += JMP32_INSN_SIZE; |
546 | break; |
547 | |
548 | default: |
549 | WARN(1, "%pS %px %*ph\n" , addr, addr, 6, addr); |
550 | return -1; |
551 | } |
552 | |
553 | WARN_ON_ONCE(i != insn->length); |
554 | |
555 | return i; |
556 | } |
557 | |
558 | /* |
559 | * Rewrite the compiler generated retpoline thunk calls. |
560 | * |
561 | * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate |
562 | * indirect instructions, avoiding the extra indirection. |
563 | * |
564 | * For example, convert: |
565 | * |
566 | * CALL __x86_indirect_thunk_\reg |
567 | * |
568 | * into: |
569 | * |
570 | * CALL *%\reg |
571 | * |
572 | * It also tries to inline spectre_v2=retpoline,lfence when size permits. |
573 | */ |
574 | static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) |
575 | { |
576 | retpoline_thunk_t *target; |
577 | int reg, ret, i = 0; |
578 | u8 op, cc; |
579 | |
580 | target = addr + insn->length + insn->immediate.value; |
581 | reg = target - __x86_indirect_thunk_array; |
582 | |
583 | if (WARN_ON_ONCE(reg & ~0xf)) |
584 | return -1; |
585 | |
586 | /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */ |
587 | BUG_ON(reg == 4); |
588 | |
589 | if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && |
590 | !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { |
591 | if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) |
592 | return emit_call_track_retpoline(addr, insn, reg, bytes); |
593 | |
594 | return -1; |
595 | } |
596 | |
597 | op = insn->opcode.bytes[0]; |
598 | |
599 | /* |
600 | * Convert: |
601 | * |
602 | * Jcc.d32 __x86_indirect_thunk_\reg |
603 | * |
604 | * into: |
605 | * |
606 | * Jncc.d8 1f |
607 | * [ LFENCE ] |
608 | * JMP *%\reg |
609 | * [ NOP ] |
610 | * 1: |
611 | */ |
612 | if (is_jcc32(insn)) { |
613 | cc = insn->opcode.bytes[1] & 0xf; |
614 | cc ^= 1; /* invert condition */ |
615 | |
616 | bytes[i++] = 0x70 + cc; /* Jcc.d8 */ |
617 | bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ |
618 | |
619 | /* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */ |
620 | op = JMP32_INSN_OPCODE; |
621 | } |
622 | |
623 | /* |
624 | * For RETPOLINE_LFENCE: prepend the indirect CALL/JMP with an LFENCE. |
625 | */ |
626 | if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { |
627 | bytes[i++] = 0x0f; |
628 | bytes[i++] = 0xae; |
629 | bytes[i++] = 0xe8; /* LFENCE */ |
630 | } |
631 | |
632 | ret = emit_indirect(op, reg, bytes: bytes + i); |
633 | if (ret < 0) |
634 | return ret; |
635 | i += ret; |
636 | |
637 | /* |
638 | * The compiler is supposed to EMIT an INT3 after every unconditional |
639 | * JMP instruction due to AMD BTC. However, if the compiler is too old |
640 | * or SLS isn't enabled, we still need an INT3 after indirect JMPs |
641 | * even on Intel. |
642 | */ |
643 | if (op == JMP32_INSN_OPCODE && i < insn->length) |
644 | bytes[i++] = INT3_INSN_OPCODE; |
645 | |
646 | for (; i < insn->length;) |
647 | bytes[i++] = BYTES_NOP1; |
648 | |
649 | return i; |
650 | } |
651 | |
652 | /* |
653 | * Generated by 'objtool --retpoline'. |
654 | */ |
655 | void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) |
656 | { |
657 | s32 *s; |
658 | |
659 | for (s = start; s < end; s++) { |
660 | void *addr = (void *)s + *s; |
661 | struct insn insn; |
662 | int len, ret; |
663 | u8 bytes[16]; |
664 | u8 op1, op2; |
665 | |
666 | ret = insn_decode_kernel(&insn, addr); |
667 | if (WARN_ON_ONCE(ret < 0)) |
668 | continue; |
669 | |
670 | op1 = insn.opcode.bytes[0]; |
671 | op2 = insn.opcode.bytes[1]; |
672 | |
673 | switch (op1) { |
674 | case CALL_INSN_OPCODE: |
675 | case JMP32_INSN_OPCODE: |
676 | break; |
677 | |
678 | case 0x0f: /* escape */ |
679 | if (op2 >= 0x80 && op2 <= 0x8f) |
680 | break; |
681 | fallthrough; |
682 | default: |
683 | WARN_ON_ONCE(1); |
684 | continue; |
685 | } |
686 | |
687 | DPRINTK(RETPOLINE, "retpoline at: %pS (%px) len: %d to: %pS" , |
688 | addr, addr, insn.length, |
689 | addr + insn.length + insn.immediate.value); |
690 | |
691 | len = patch_retpoline(addr, insn: &insn, bytes); |
692 | if (len == insn.length) { |
693 | optimize_nops(instr: bytes, len); |
694 | DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: " , addr); |
695 | DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: " , addr); |
696 | text_poke_early(addr, opcode: bytes, len); |
697 | } |
698 | } |
699 | } |
700 | |
701 | #ifdef CONFIG_RETHUNK |
702 | |
703 | /* |
704 | * Rewrite the compiler generated return thunk tail-calls. |
705 | * |
706 | * For example, convert: |
707 | * |
708 | * JMP __x86_return_thunk |
709 | * |
710 | * into: |
711 | * |
712 | * RET |
713 | */ |
714 | static int patch_return(void *addr, struct insn *insn, u8 *bytes) |
715 | { |
716 | int i = 0; |
717 | |
718 | /* Patch the custom return thunks... */ |
719 | if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { |
720 | i = JMP32_INSN_SIZE; |
721 | __text_gen_insn(buf: bytes, JMP32_INSN_OPCODE, addr, dest: x86_return_thunk, size: i); |
722 | } else { |
723 | /* ... or patch them out if not needed. */ |
724 | bytes[i++] = RET_INSN_OPCODE; |
725 | } |
726 | |
727 | for (; i < insn->length;) |
728 | bytes[i++] = INT3_INSN_OPCODE; |
729 | return i; |
730 | } |
731 | |
732 | void __init_or_module noinline apply_returns(s32 *start, s32 *end) |
733 | { |
734 | s32 *s; |
735 | |
736 | if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) |
737 | static_call_force_reinit(); |
738 | |
739 | for (s = start; s < end; s++) { |
740 | void *dest = NULL, *addr = (void *)s + *s; |
741 | struct insn insn; |
742 | int len, ret; |
743 | u8 bytes[16]; |
744 | u8 op; |
745 | |
746 | ret = insn_decode_kernel(&insn, addr); |
747 | if (WARN_ON_ONCE(ret < 0)) |
748 | continue; |
749 | |
750 | op = insn.opcode.bytes[0]; |
751 | if (op == JMP32_INSN_OPCODE) |
752 | dest = addr + insn.length + insn.immediate.value; |
753 | |
754 | if (__static_call_fixup(tramp: addr, op, dest) || |
755 | WARN_ONCE(dest != &__x86_return_thunk, |
756 | "missing return thunk: %pS-%pS: %*ph" , |
757 | addr, dest, 5, addr)) |
758 | continue; |
759 | |
760 | DPRINTK(RET, "return thunk at: %pS (%px) len: %d to: %pS" , |
761 | addr, addr, insn.length, |
762 | addr + insn.length + insn.immediate.value); |
763 | |
764 | len = patch_return(addr, insn: &insn, bytes); |
765 | if (len == insn.length) { |
766 | DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: " , addr); |
767 | DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: " , addr); |
768 | text_poke_early(addr, opcode: bytes, len); |
769 | } |
770 | } |
771 | } |
772 | #else |
773 | void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } |
774 | #endif /* CONFIG_RETHUNK */ |
775 | |
776 | #else /* !CONFIG_RETPOLINE || !CONFIG_OBJTOOL */ |
777 | |
778 | void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { } |
779 | void __init_or_module noinline apply_returns(s32 *start, s32 *end) { } |
780 | |
781 | #endif /* CONFIG_RETPOLINE && CONFIG_OBJTOOL */ |
782 | |
783 | #ifdef CONFIG_X86_KERNEL_IBT |
784 | |
785 | static void poison_cfi(void *addr); |
786 | |
787 | static void __init_or_module poison_endbr(void *addr, bool warn) |
788 | { |
789 | u32 endbr, poison = gen_endbr_poison(); |
790 | |
791 | if (WARN_ON_ONCE(get_kernel_nofault(endbr, addr))) |
792 | return; |
793 | |
794 | if (!is_endbr(val: endbr)) { |
795 | WARN_ON_ONCE(warn); |
796 | return; |
797 | } |
798 | |
799 | DPRINTK(ENDBR, "ENDBR at: %pS (%px)" , addr, addr); |
800 | |
801 | /* |
802 | * When we have IBT, the lack of ENDBR will trigger #CP |
803 | */ |
804 | DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: " , addr); |
805 | DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: " , addr); |
806 | text_poke_early(addr, opcode: &poison, len: 4); |
807 | } |
808 | |
809 | /* |
810 | * Generated by: objtool --ibt |
811 | * |
812 | * Seal the functions for indirect calls by clobbering the ENDBR instructions |
813 | * and the kCFI hash value. |
814 | */ |
815 | void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end) |
816 | { |
817 | s32 *s; |
818 | |
819 | for (s = start; s < end; s++) { |
820 | void *addr = (void *)s + *s; |
821 | |
822 | poison_endbr(addr, warn: true); |
823 | if (IS_ENABLED(CONFIG_FINEIBT)) |
824 | poison_cfi(addr: addr - 16); |
825 | } |
826 | } |
827 | |
828 | #else |
829 | |
830 | void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } |
831 | |
832 | #endif /* CONFIG_X86_KERNEL_IBT */ |
833 | |
834 | #ifdef CONFIG_FINEIBT |
835 | |
836 | enum cfi_mode { |
837 | CFI_DEFAULT, |
838 | CFI_OFF, |
839 | CFI_KCFI, |
840 | CFI_FINEIBT, |
841 | }; |
842 | |
843 | static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; |
844 | static bool cfi_rand __ro_after_init = true; |
845 | static u32 cfi_seed __ro_after_init; |
846 | |
847 | /* |
848 | * Re-hash the CFI hash with a boot-time seed while making sure the result is |
849 | * not a valid ENDBR instruction. |
850 | */ |
851 | static u32 cfi_rehash(u32 hash) |
852 | { |
853 | hash ^= cfi_seed; |
854 | while (unlikely(is_endbr(hash) || is_endbr(-hash))) { |
855 | bool lsb = hash & 1; |
856 | hash >>= 1; |
857 | if (lsb) |
858 | hash ^= 0x80200003; |
859 | } |
860 | return hash; |
861 | } |
862 | |
863 | static __init int cfi_parse_cmdline(char *str) |
864 | { |
865 | if (!str) |
866 | return -EINVAL; |
867 | |
868 | while (str) { |
869 | char *next = strchr(str, ','); |
870 | if (next) { |
871 | *next = 0; |
872 | next++; |
873 | } |
874 | |
875 | if (!strcmp(str, "auto" )) { |
876 | cfi_mode = CFI_DEFAULT; |
877 | } else if (!strcmp(str, "off" )) { |
878 | cfi_mode = CFI_OFF; |
879 | cfi_rand = false; |
880 | } else if (!strcmp(str, "kcfi" )) { |
881 | cfi_mode = CFI_KCFI; |
882 | } else if (!strcmp(str, "fineibt" )) { |
883 | cfi_mode = CFI_FINEIBT; |
884 | } else if (!strcmp(str, "norand" )) { |
885 | cfi_rand = false; |
886 | } else { |
887 | pr_err("Ignoring unknown cfi option (%s)." , str); |
888 | } |
889 | |
890 | str = next; |
891 | } |
892 | |
893 | return 0; |
894 | } |
895 | early_param("cfi" , cfi_parse_cmdline); |
896 | |
897 | /* |
898 | * kCFI FineIBT |
899 | * |
900 | * __cfi_\func: __cfi_\func: |
901 | * movl $0x12345678,%eax // 5 endbr64 // 4 |
902 | * nop subl $0x12345678,%r10d // 7 |
903 | * nop jz 1f // 2 |
904 | * nop ud2 // 2 |
905 | * nop 1: nop // 1 |
906 | * nop |
907 | * nop |
908 | * nop |
909 | * nop |
910 | * nop |
911 | * nop |
912 | * nop |
913 | * |
914 | * |
915 | * caller: caller: |
916 | * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6 |
917 | * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4 |
918 | * je 1f // 2 nop4 // 4 |
919 | * ud2 // 2 |
920 | * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5 |
921 | * |
922 | */ |
923 | |
924 | asm( ".pushsection .rodata \n" |
925 | "fineibt_preamble_start: \n" |
926 | " endbr64 \n" |
927 | " subl $0x12345678, %r10d \n" |
928 | " je fineibt_preamble_end \n" |
929 | " ud2 \n" |
930 | " nop \n" |
931 | "fineibt_preamble_end: \n" |
932 | ".popsection\n" |
933 | ); |
934 | |
935 | extern u8 fineibt_preamble_start[]; |
936 | extern u8 fineibt_preamble_end[]; |
937 | |
938 | #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start) |
939 | #define fineibt_preamble_hash 7 |
940 | |
941 | asm( ".pushsection .rodata \n" |
942 | "fineibt_caller_start: \n" |
943 | " movl $0x12345678, %r10d \n" |
944 | " sub $16, %r11 \n" |
945 | ASM_NOP4 |
946 | "fineibt_caller_end: \n" |
947 | ".popsection \n" |
948 | ); |
949 | |
950 | extern u8 fineibt_caller_start[]; |
951 | extern u8 fineibt_caller_end[]; |
952 | |
953 | #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start) |
954 | #define fineibt_caller_hash 2 |
955 | |
956 | #define fineibt_caller_jmp (fineibt_caller_size - 2) |
957 | |
958 | static u32 decode_preamble_hash(void *addr) |
959 | { |
960 | u8 *p = addr; |
961 | |
962 | /* b8 78 56 34 12 mov $0x12345678,%eax */ |
963 | if (p[0] == 0xb8) |
964 | return *(u32 *)(addr + 1); |
965 | |
966 | return 0; /* invalid hash value */ |
967 | } |
968 | |
969 | static u32 decode_caller_hash(void *addr) |
970 | { |
971 | u8 *p = addr; |
972 | |
973 | /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */ |
974 | if (p[0] == 0x41 && p[1] == 0xba) |
975 | return -*(u32 *)(addr + 2); |
976 | |
977 | /* e8 0c 78 56 34 12 jmp.d8 +12 */ |
978 | if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp) |
979 | return -*(u32 *)(addr + 2); |
980 | |
981 | return 0; /* invalid hash value */ |
982 | } |
983 | |
984 | /* .retpoline_sites */ |
985 | static int cfi_disable_callers(s32 *start, s32 *end) |
986 | { |
987 | /* |
988 | * Disable kCFI by patching in a JMP.d8, this leaves the hash immediate |
989 | * in tact for later usage. Also see decode_caller_hash() and |
990 | * cfi_rewrite_callers(). |
991 | */ |
992 | const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp }; |
993 | s32 *s; |
994 | |
995 | for (s = start; s < end; s++) { |
996 | void *addr = (void *)s + *s; |
997 | u32 hash; |
998 | |
999 | addr -= fineibt_caller_size; |
1000 | hash = decode_caller_hash(addr); |
1001 | if (!hash) /* nocfi callers */ |
1002 | continue; |
1003 | |
1004 | text_poke_early(addr, jmp, 2); |
1005 | } |
1006 | |
1007 | return 0; |
1008 | } |
1009 | |
1010 | static int cfi_enable_callers(s32 *start, s32 *end) |
1011 | { |
1012 | /* |
1013 | * Re-enable kCFI, undo what cfi_disable_callers() did. |
1014 | */ |
1015 | const u8 mov[] = { 0x41, 0xba }; |
1016 | s32 *s; |
1017 | |
1018 | for (s = start; s < end; s++) { |
1019 | void *addr = (void *)s + *s; |
1020 | u32 hash; |
1021 | |
1022 | addr -= fineibt_caller_size; |
1023 | hash = decode_caller_hash(addr); |
1024 | if (!hash) /* nocfi callers */ |
1025 | continue; |
1026 | |
1027 | text_poke_early(addr, mov, 2); |
1028 | } |
1029 | |
1030 | return 0; |
1031 | } |
1032 | |
1033 | /* .cfi_sites */ |
1034 | static int cfi_rand_preamble(s32 *start, s32 *end) |
1035 | { |
1036 | s32 *s; |
1037 | |
1038 | for (s = start; s < end; s++) { |
1039 | void *addr = (void *)s + *s; |
1040 | u32 hash; |
1041 | |
1042 | hash = decode_preamble_hash(addr); |
1043 | if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n" , |
1044 | addr, addr, 5, addr)) |
1045 | return -EINVAL; |
1046 | |
1047 | hash = cfi_rehash(hash); |
1048 | text_poke_early(addr + 1, &hash, 4); |
1049 | } |
1050 | |
1051 | return 0; |
1052 | } |
1053 | |
1054 | static int cfi_rewrite_preamble(s32 *start, s32 *end) |
1055 | { |
1056 | s32 *s; |
1057 | |
1058 | for (s = start; s < end; s++) { |
1059 | void *addr = (void *)s + *s; |
1060 | u32 hash; |
1061 | |
1062 | hash = decode_preamble_hash(addr); |
1063 | if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n" , |
1064 | addr, addr, 5, addr)) |
1065 | return -EINVAL; |
1066 | |
1067 | text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); |
1068 | WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); |
1069 | text_poke_early(addr + fineibt_preamble_hash, &hash, 4); |
1070 | } |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static void cfi_rewrite_endbr(s32 *start, s32 *end) |
1076 | { |
1077 | s32 *s; |
1078 | |
1079 | for (s = start; s < end; s++) { |
1080 | void *addr = (void *)s + *s; |
1081 | |
1082 | poison_endbr(addr+16, false); |
1083 | } |
1084 | } |
1085 | |
1086 | /* .retpoline_sites */ |
1087 | static int cfi_rand_callers(s32 *start, s32 *end) |
1088 | { |
1089 | s32 *s; |
1090 | |
1091 | for (s = start; s < end; s++) { |
1092 | void *addr = (void *)s + *s; |
1093 | u32 hash; |
1094 | |
1095 | addr -= fineibt_caller_size; |
1096 | hash = decode_caller_hash(addr); |
1097 | if (hash) { |
1098 | hash = -cfi_rehash(hash); |
1099 | text_poke_early(addr + 2, &hash, 4); |
1100 | } |
1101 | } |
1102 | |
1103 | return 0; |
1104 | } |
1105 | |
1106 | static int cfi_rewrite_callers(s32 *start, s32 *end) |
1107 | { |
1108 | s32 *s; |
1109 | |
1110 | for (s = start; s < end; s++) { |
1111 | void *addr = (void *)s + *s; |
1112 | u32 hash; |
1113 | |
1114 | addr -= fineibt_caller_size; |
1115 | hash = decode_caller_hash(addr); |
1116 | if (hash) { |
1117 | text_poke_early(addr, fineibt_caller_start, fineibt_caller_size); |
1118 | WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678); |
1119 | text_poke_early(addr + fineibt_caller_hash, &hash, 4); |
1120 | } |
1121 | /* rely on apply_retpolines() */ |
1122 | } |
1123 | |
1124 | return 0; |
1125 | } |
1126 | |
1127 | static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, |
1128 | s32 *start_cfi, s32 *end_cfi, bool builtin) |
1129 | { |
1130 | int ret; |
1131 | |
1132 | if (WARN_ONCE(fineibt_preamble_size != 16, |
1133 | "FineIBT preamble wrong size: %ld" , fineibt_preamble_size)) |
1134 | return; |
1135 | |
1136 | if (cfi_mode == CFI_DEFAULT) { |
1137 | cfi_mode = CFI_KCFI; |
1138 | if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) |
1139 | cfi_mode = CFI_FINEIBT; |
1140 | } |
1141 | |
1142 | /* |
1143 | * Rewrite the callers to not use the __cfi_ stubs, such that we might |
1144 | * rewrite them. This disables all CFI. If this succeeds but any of the |
1145 | * later stages fails, we're without CFI. |
1146 | */ |
1147 | ret = cfi_disable_callers(start_retpoline, end_retpoline); |
1148 | if (ret) |
1149 | goto err; |
1150 | |
1151 | if (cfi_rand) { |
1152 | if (builtin) |
1153 | cfi_seed = get_random_u32(); |
1154 | |
1155 | ret = cfi_rand_preamble(start_cfi, end_cfi); |
1156 | if (ret) |
1157 | goto err; |
1158 | |
1159 | ret = cfi_rand_callers(start_retpoline, end_retpoline); |
1160 | if (ret) |
1161 | goto err; |
1162 | } |
1163 | |
1164 | switch (cfi_mode) { |
1165 | case CFI_OFF: |
1166 | if (builtin) |
1167 | pr_info("Disabling CFI\n" ); |
1168 | return; |
1169 | |
1170 | case CFI_KCFI: |
1171 | ret = cfi_enable_callers(start_retpoline, end_retpoline); |
1172 | if (ret) |
1173 | goto err; |
1174 | |
1175 | if (builtin) |
1176 | pr_info("Using kCFI\n" ); |
1177 | return; |
1178 | |
1179 | case CFI_FINEIBT: |
1180 | /* place the FineIBT preamble at func()-16 */ |
1181 | ret = cfi_rewrite_preamble(start_cfi, end_cfi); |
1182 | if (ret) |
1183 | goto err; |
1184 | |
1185 | /* rewrite the callers to target func()-16 */ |
1186 | ret = cfi_rewrite_callers(start_retpoline, end_retpoline); |
1187 | if (ret) |
1188 | goto err; |
1189 | |
1190 | /* now that nobody targets func()+0, remove ENDBR there */ |
1191 | cfi_rewrite_endbr(start_cfi, end_cfi); |
1192 | |
1193 | if (builtin) |
1194 | pr_info("Using FineIBT CFI\n" ); |
1195 | return; |
1196 | |
1197 | default: |
1198 | break; |
1199 | } |
1200 | |
1201 | err: |
1202 | pr_err("Something went horribly wrong trying to rewrite the CFI implementation.\n" ); |
1203 | } |
1204 | |
1205 | static inline void poison_hash(void *addr) |
1206 | { |
1207 | *(u32 *)addr = 0; |
1208 | } |
1209 | |
1210 | static void poison_cfi(void *addr) |
1211 | { |
1212 | switch (cfi_mode) { |
1213 | case CFI_FINEIBT: |
1214 | /* |
1215 | * __cfi_\func: |
1216 | * osp nopl (%rax) |
1217 | * subl $0, %r10d |
1218 | * jz 1f |
1219 | * ud2 |
1220 | * 1: nop |
1221 | */ |
1222 | poison_endbr(addr, false); |
1223 | poison_hash(addr + fineibt_preamble_hash); |
1224 | break; |
1225 | |
1226 | case CFI_KCFI: |
1227 | /* |
1228 | * __cfi_\func: |
1229 | * movl $0, %eax |
1230 | * .skip 11, 0x90 |
1231 | */ |
1232 | poison_hash(addr + 1); |
1233 | break; |
1234 | |
1235 | default: |
1236 | break; |
1237 | } |
1238 | } |
1239 | |
1240 | #else |
1241 | |
1242 | static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, |
1243 | s32 *start_cfi, s32 *end_cfi, bool builtin) |
1244 | { |
1245 | } |
1246 | |
1247 | #ifdef CONFIG_X86_KERNEL_IBT |
1248 | static void poison_cfi(void *addr) { } |
1249 | #endif |
1250 | |
1251 | #endif |
1252 | |
1253 | void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, |
1254 | s32 *start_cfi, s32 *end_cfi) |
1255 | { |
1256 | return __apply_fineibt(start_retpoline, end_retpoline, |
1257 | start_cfi, end_cfi, |
1258 | /* .builtin = */ false); |
1259 | } |
1260 | |
1261 | #ifdef CONFIG_SMP |
1262 | static void alternatives_smp_lock(const s32 *start, const s32 *end, |
1263 | u8 *text, u8 *text_end) |
1264 | { |
1265 | const s32 *poff; |
1266 | |
1267 | for (poff = start; poff < end; poff++) { |
1268 | u8 *ptr = (u8 *)poff + *poff; |
1269 | |
1270 | if (!*poff || ptr < text || ptr >= text_end) |
1271 | continue; |
1272 | /* turn DS segment override prefix into lock prefix */ |
1273 | if (*ptr == 0x3e) |
1274 | text_poke(addr: ptr, opcode: ((unsigned char []){0xf0}), len: 1); |
1275 | } |
1276 | } |
1277 | |
1278 | static void alternatives_smp_unlock(const s32 *start, const s32 *end, |
1279 | u8 *text, u8 *text_end) |
1280 | { |
1281 | const s32 *poff; |
1282 | |
1283 | for (poff = start; poff < end; poff++) { |
1284 | u8 *ptr = (u8 *)poff + *poff; |
1285 | |
1286 | if (!*poff || ptr < text || ptr >= text_end) |
1287 | continue; |
1288 | /* turn lock prefix into DS segment override prefix */ |
1289 | if (*ptr == 0xf0) |
1290 | text_poke(addr: ptr, opcode: ((unsigned char []){0x3E}), len: 1); |
1291 | } |
1292 | } |
1293 | |
1294 | struct smp_alt_module { |
1295 | /* what is this ??? */ |
1296 | struct module *mod; |
1297 | char *name; |
1298 | |
1299 | /* ptrs to lock prefixes */ |
1300 | const s32 *locks; |
1301 | const s32 *locks_end; |
1302 | |
1303 | /* .text segment, needed to avoid patching init code ;) */ |
1304 | u8 *text; |
1305 | u8 *text_end; |
1306 | |
1307 | struct list_head next; |
1308 | }; |
1309 | static LIST_HEAD(smp_alt_modules); |
1310 | static bool uniproc_patched = false; /* protected by text_mutex */ |
1311 | |
1312 | void __init_or_module alternatives_smp_module_add(struct module *mod, |
1313 | char *name, |
1314 | void *locks, void *locks_end, |
1315 | void *text, void *text_end) |
1316 | { |
1317 | struct smp_alt_module *smp; |
1318 | |
1319 | mutex_lock(&text_mutex); |
1320 | if (!uniproc_patched) |
1321 | goto unlock; |
1322 | |
1323 | if (num_possible_cpus() == 1) |
1324 | /* Don't bother remembering, we'll never have to undo it. */ |
1325 | goto smp_unlock; |
1326 | |
1327 | smp = kzalloc(size: sizeof(*smp), GFP_KERNEL); |
1328 | if (NULL == smp) |
1329 | /* we'll run the (safe but slow) SMP code then ... */ |
1330 | goto unlock; |
1331 | |
1332 | smp->mod = mod; |
1333 | smp->name = name; |
1334 | smp->locks = locks; |
1335 | smp->locks_end = locks_end; |
1336 | smp->text = text; |
1337 | smp->text_end = text_end; |
1338 | DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n" , |
1339 | smp->locks, smp->locks_end, |
1340 | smp->text, smp->text_end, smp->name); |
1341 | |
1342 | list_add_tail(new: &smp->next, head: &smp_alt_modules); |
1343 | smp_unlock: |
1344 | alternatives_smp_unlock(start: locks, end: locks_end, text, text_end); |
1345 | unlock: |
1346 | mutex_unlock(lock: &text_mutex); |
1347 | } |
1348 | |
1349 | void __init_or_module alternatives_smp_module_del(struct module *mod) |
1350 | { |
1351 | struct smp_alt_module *item; |
1352 | |
1353 | mutex_lock(&text_mutex); |
1354 | list_for_each_entry(item, &smp_alt_modules, next) { |
1355 | if (mod != item->mod) |
1356 | continue; |
1357 | list_del(entry: &item->next); |
1358 | kfree(objp: item); |
1359 | break; |
1360 | } |
1361 | mutex_unlock(lock: &text_mutex); |
1362 | } |
1363 | |
1364 | void alternatives_enable_smp(void) |
1365 | { |
1366 | struct smp_alt_module *mod; |
1367 | |
1368 | /* Why bother if there are no other CPUs? */ |
1369 | BUG_ON(num_possible_cpus() == 1); |
1370 | |
1371 | mutex_lock(&text_mutex); |
1372 | |
1373 | if (uniproc_patched) { |
1374 | pr_info("switching to SMP code\n" ); |
1375 | BUG_ON(num_online_cpus() != 1); |
1376 | clear_cpu_cap(c: &boot_cpu_data, X86_FEATURE_UP); |
1377 | clear_cpu_cap(c: &cpu_data(0), X86_FEATURE_UP); |
1378 | list_for_each_entry(mod, &smp_alt_modules, next) |
1379 | alternatives_smp_lock(start: mod->locks, end: mod->locks_end, |
1380 | text: mod->text, text_end: mod->text_end); |
1381 | uniproc_patched = false; |
1382 | } |
1383 | mutex_unlock(lock: &text_mutex); |
1384 | } |
1385 | |
1386 | /* |
1387 | * Return 1 if the address range is reserved for SMP-alternatives. |
1388 | * Must hold text_mutex. |
1389 | */ |
1390 | int alternatives_text_reserved(void *start, void *end) |
1391 | { |
1392 | struct smp_alt_module *mod; |
1393 | const s32 *poff; |
1394 | u8 *text_start = start; |
1395 | u8 *text_end = end; |
1396 | |
1397 | lockdep_assert_held(&text_mutex); |
1398 | |
1399 | list_for_each_entry(mod, &smp_alt_modules, next) { |
1400 | if (mod->text > text_end || mod->text_end < text_start) |
1401 | continue; |
1402 | for (poff = mod->locks; poff < mod->locks_end; poff++) { |
1403 | const u8 *ptr = (const u8 *)poff + *poff; |
1404 | |
1405 | if (text_start <= ptr && text_end > ptr) |
1406 | return 1; |
1407 | } |
1408 | } |
1409 | |
1410 | return 0; |
1411 | } |
1412 | #endif /* CONFIG_SMP */ |
1413 | |
1414 | #ifdef CONFIG_PARAVIRT |
1415 | |
1416 | /* Use this to add nops to a buffer, then text_poke the whole buffer. */ |
1417 | static void __init_or_module add_nops(void *insns, unsigned int len) |
1418 | { |
1419 | while (len > 0) { |
1420 | unsigned int noplen = len; |
1421 | if (noplen > ASM_NOP_MAX) |
1422 | noplen = ASM_NOP_MAX; |
1423 | memcpy(insns, x86_nops[noplen], noplen); |
1424 | insns += noplen; |
1425 | len -= noplen; |
1426 | } |
1427 | } |
1428 | |
1429 | void __init_or_module apply_paravirt(struct paravirt_patch_site *start, |
1430 | struct paravirt_patch_site *end) |
1431 | { |
1432 | struct paravirt_patch_site *p; |
1433 | char insn_buff[MAX_PATCH_LEN]; |
1434 | |
1435 | for (p = start; p < end; p++) { |
1436 | unsigned int used; |
1437 | |
1438 | BUG_ON(p->len > MAX_PATCH_LEN); |
1439 | /* prep the buffer with the original instructions */ |
1440 | memcpy(insn_buff, p->instr, p->len); |
1441 | used = paravirt_patch(type: p->type, insn_buff, addr: (unsigned long)p->instr, len: p->len); |
1442 | |
1443 | BUG_ON(used > p->len); |
1444 | |
1445 | /* Pad the rest with nops */ |
1446 | add_nops(insns: insn_buff + used, len: p->len - used); |
1447 | text_poke_early(addr: p->instr, opcode: insn_buff, len: p->len); |
1448 | } |
1449 | } |
1450 | extern struct paravirt_patch_site __start_parainstructions[], |
1451 | __stop_parainstructions[]; |
1452 | #endif /* CONFIG_PARAVIRT */ |
1453 | |
1454 | /* |
1455 | * Self-test for the INT3 based CALL emulation code. |
1456 | * |
1457 | * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up |
1458 | * properly and that there is a stack gap between the INT3 frame and the |
1459 | * previous context. Without this gap doing a virtual PUSH on the interrupted |
1460 | * stack would corrupt the INT3 IRET frame. |
1461 | * |
1462 | * See entry_{32,64}.S for more details. |
1463 | */ |
1464 | |
1465 | /* |
1466 | * We define the int3_magic() function in assembly to control the calling |
1467 | * convention such that we can 'call' it from assembly. |
1468 | */ |
1469 | |
1470 | extern void int3_magic(unsigned int *ptr); /* defined in asm */ |
1471 | |
1472 | asm ( |
1473 | " .pushsection .init.text, \"ax\", @progbits\n" |
1474 | " .type int3_magic, @function\n" |
1475 | "int3_magic:\n" |
1476 | ANNOTATE_NOENDBR |
1477 | " movl $1, (%" _ASM_ARG1 ")\n" |
1478 | ASM_RET |
1479 | " .size int3_magic, .-int3_magic\n" |
1480 | " .popsection\n" |
1481 | ); |
1482 | |
1483 | extern void int3_selftest_ip(void); /* defined in asm below */ |
1484 | |
1485 | static int __init |
1486 | int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) |
1487 | { |
1488 | unsigned long selftest = (unsigned long)&int3_selftest_ip; |
1489 | struct die_args *args = data; |
1490 | struct pt_regs *regs = args->regs; |
1491 | |
1492 | OPTIMIZER_HIDE_VAR(selftest); |
1493 | |
1494 | if (!regs || user_mode(regs)) |
1495 | return NOTIFY_DONE; |
1496 | |
1497 | if (val != DIE_INT3) |
1498 | return NOTIFY_DONE; |
1499 | |
1500 | if (regs->ip - INT3_INSN_SIZE != selftest) |
1501 | return NOTIFY_DONE; |
1502 | |
1503 | int3_emulate_call(regs, func: (unsigned long)&int3_magic); |
1504 | return NOTIFY_STOP; |
1505 | } |
1506 | |
1507 | /* Must be noinline to ensure uniqueness of int3_selftest_ip. */ |
1508 | static noinline void __init int3_selftest(void) |
1509 | { |
1510 | static __initdata struct notifier_block int3_exception_nb = { |
1511 | .notifier_call = int3_exception_notify, |
1512 | .priority = INT_MAX-1, /* last */ |
1513 | }; |
1514 | unsigned int val = 0; |
1515 | |
1516 | BUG_ON(register_die_notifier(&int3_exception_nb)); |
1517 | |
1518 | /* |
1519 | * Basically: int3_magic(&val); but really complicated :-) |
1520 | * |
1521 | * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb |
1522 | * notifier above will emulate CALL for us. |
1523 | */ |
1524 | asm volatile ("int3_selftest_ip:\n\t" |
1525 | ANNOTATE_NOENDBR |
1526 | " int3; nop; nop; nop; nop\n\t" |
1527 | : ASM_CALL_CONSTRAINT |
1528 | : __ASM_SEL_RAW(a, D) (&val) |
1529 | : "memory" ); |
1530 | |
1531 | BUG_ON(val != 1); |
1532 | |
1533 | unregister_die_notifier(nb: &int3_exception_nb); |
1534 | } |
1535 | |
1536 | static __initdata int __alt_reloc_selftest_addr; |
1537 | |
1538 | extern void __init __alt_reloc_selftest(void *arg); |
1539 | __visible noinline void __init __alt_reloc_selftest(void *arg) |
1540 | { |
1541 | WARN_ON(arg != &__alt_reloc_selftest_addr); |
1542 | } |
1543 | |
1544 | static noinline void __init alt_reloc_selftest(void) |
1545 | { |
1546 | /* |
1547 | * Tests apply_relocation(). |
1548 | * |
1549 | * This has a relative immediate (CALL) in a place other than the first |
1550 | * instruction and additionally on x86_64 we get a RIP-relative LEA: |
1551 | * |
1552 | * lea 0x0(%rip),%rdi # 5d0: R_X86_64_PC32 .init.data+0x5566c |
1553 | * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4 |
1554 | * |
1555 | * Getting this wrong will either crash and burn or tickle the WARN |
1556 | * above. |
1557 | */ |
1558 | asm_inline volatile ( |
1559 | ALTERNATIVE("" , "lea %[mem], %%" _ASM_ARG1 "; call __alt_reloc_selftest;" , X86_FEATURE_ALWAYS) |
1560 | : /* output */ |
1561 | : [mem] "m" (__alt_reloc_selftest_addr) |
1562 | : _ASM_ARG1 |
1563 | ); |
1564 | } |
1565 | |
1566 | void __init alternative_instructions(void) |
1567 | { |
1568 | int3_selftest(); |
1569 | |
1570 | /* |
1571 | * The patching is not fully atomic, so try to avoid local |
1572 | * interruptions that might execute the to be patched code. |
1573 | * Other CPUs are not running. |
1574 | */ |
1575 | stop_nmi(); |
1576 | |
1577 | /* |
1578 | * Don't stop machine check exceptions while patching. |
1579 | * MCEs only happen when something got corrupted and in this |
1580 | * case we must do something about the corruption. |
1581 | * Ignoring it is worse than an unlikely patching race. |
1582 | * Also machine checks tend to be broadcast and if one CPU |
1583 | * goes into machine check the others follow quickly, so we don't |
1584 | * expect a machine check to cause undue problems during to code |
1585 | * patching. |
1586 | */ |
1587 | |
1588 | /* |
1589 | * Paravirt patching and alternative patching can be combined to |
1590 | * replace a function call with a short direct code sequence (e.g. |
1591 | * by setting a constant return value instead of doing that in an |
1592 | * external function). |
1593 | * In order to make this work the following sequence is required: |
1594 | * 1. set (artificial) features depending on used paravirt |
1595 | * functions which can later influence alternative patching |
1596 | * 2. apply paravirt patching (generally replacing an indirect |
1597 | * function call with a direct one) |
1598 | * 3. apply alternative patching (e.g. replacing a direct function |
1599 | * call with a custom code sequence) |
1600 | * Doing paravirt patching after alternative patching would clobber |
1601 | * the optimization of the custom code with a function call again. |
1602 | */ |
1603 | paravirt_set_cap(); |
1604 | |
1605 | /* |
1606 | * First patch paravirt functions, such that we overwrite the indirect |
1607 | * call with the direct call. |
1608 | */ |
1609 | apply_paravirt(start: __parainstructions, end: __parainstructions_end); |
1610 | |
1611 | __apply_fineibt(start_retpoline: __retpoline_sites, end_retpoline: __retpoline_sites_end, |
1612 | start_cfi: __cfi_sites, end_cfi: __cfi_sites_end, builtin: true); |
1613 | |
1614 | /* |
1615 | * Rewrite the retpolines, must be done before alternatives since |
1616 | * those can rewrite the retpoline thunks. |
1617 | */ |
1618 | apply_retpolines(start: __retpoline_sites, end: __retpoline_sites_end); |
1619 | apply_returns(start: __return_sites, end: __return_sites_end); |
1620 | |
1621 | /* |
1622 | * Then patch alternatives, such that those paravirt calls that are in |
1623 | * alternatives can be overwritten by their immediate fragments. |
1624 | */ |
1625 | apply_alternatives(start: __alt_instructions, end: __alt_instructions_end); |
1626 | |
1627 | /* |
1628 | * Now all calls are established. Apply the call thunks if |
1629 | * required. |
1630 | */ |
1631 | callthunks_patch_builtin_calls(); |
1632 | |
1633 | /* |
1634 | * Seal all functions that do not have their address taken. |
1635 | */ |
1636 | apply_seal_endbr(start: __ibt_endbr_seal, end: __ibt_endbr_seal_end); |
1637 | |
1638 | #ifdef CONFIG_SMP |
1639 | /* Patch to UP if other cpus not imminent. */ |
1640 | if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { |
1641 | uniproc_patched = true; |
1642 | alternatives_smp_module_add(NULL, name: "core kernel" , |
1643 | locks: __smp_locks, locks_end: __smp_locks_end, |
1644 | text: _text, text_end: _etext); |
1645 | } |
1646 | |
1647 | if (!uniproc_patched || num_possible_cpus() == 1) { |
1648 | free_init_pages(what: "SMP alternatives" , |
1649 | begin: (unsigned long)__smp_locks, |
1650 | end: (unsigned long)__smp_locks_end); |
1651 | } |
1652 | #endif |
1653 | |
1654 | restart_nmi(); |
1655 | alternatives_patched = 1; |
1656 | |
1657 | alt_reloc_selftest(); |
1658 | } |
1659 | |
1660 | /** |
1661 | * text_poke_early - Update instructions on a live kernel at boot time |
1662 | * @addr: address to modify |
1663 | * @opcode: source of the copy |
1664 | * @len: length to copy |
1665 | * |
1666 | * When you use this code to patch more than one byte of an instruction |
1667 | * you need to make sure that other CPUs cannot execute this code in parallel. |
1668 | * Also no thread must be currently preempted in the middle of these |
1669 | * instructions. And on the local CPU you need to be protected against NMI or |
1670 | * MCE handlers seeing an inconsistent instruction while you patch. |
1671 | */ |
1672 | void __init_or_module text_poke_early(void *addr, const void *opcode, |
1673 | size_t len) |
1674 | { |
1675 | unsigned long flags; |
1676 | |
1677 | if (boot_cpu_has(X86_FEATURE_NX) && |
1678 | is_module_text_address(addr: (unsigned long)addr)) { |
1679 | /* |
1680 | * Modules text is marked initially as non-executable, so the |
1681 | * code cannot be running and speculative code-fetches are |
1682 | * prevented. Just change the code. |
1683 | */ |
1684 | memcpy(addr, opcode, len); |
1685 | } else { |
1686 | local_irq_save(flags); |
1687 | memcpy(addr, opcode, len); |
1688 | local_irq_restore(flags); |
1689 | sync_core(); |
1690 | |
1691 | /* |
1692 | * Could also do a CLFLUSH here to speed up CPU recovery; but |
1693 | * that causes hangs on some VIA CPUs. |
1694 | */ |
1695 | } |
1696 | } |
1697 | |
1698 | typedef struct { |
1699 | struct mm_struct *mm; |
1700 | } temp_mm_state_t; |
1701 | |
1702 | /* |
1703 | * Using a temporary mm allows to set temporary mappings that are not accessible |
1704 | * by other CPUs. Such mappings are needed to perform sensitive memory writes |
1705 | * that override the kernel memory protections (e.g., W^X), without exposing the |
1706 | * temporary page-table mappings that are required for these write operations to |
1707 | * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the |
1708 | * mapping is torn down. |
1709 | * |
1710 | * Context: The temporary mm needs to be used exclusively by a single core. To |
1711 | * harden security IRQs must be disabled while the temporary mm is |
1712 | * loaded, thereby preventing interrupt handler bugs from overriding |
1713 | * the kernel memory protection. |
1714 | */ |
1715 | static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) |
1716 | { |
1717 | temp_mm_state_t temp_state; |
1718 | |
1719 | lockdep_assert_irqs_disabled(); |
1720 | |
1721 | /* |
1722 | * Make sure not to be in TLB lazy mode, as otherwise we'll end up |
1723 | * with a stale address space WITHOUT being in lazy mode after |
1724 | * restoring the previous mm. |
1725 | */ |
1726 | if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) |
1727 | leave_mm(smp_processor_id()); |
1728 | |
1729 | temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
1730 | switch_mm_irqs_off(NULL, next: mm, current); |
1731 | |
1732 | /* |
1733 | * If breakpoints are enabled, disable them while the temporary mm is |
1734 | * used. Userspace might set up watchpoints on addresses that are used |
1735 | * in the temporary mm, which would lead to wrong signals being sent or |
1736 | * crashes. |
1737 | * |
1738 | * Note that breakpoints are not disabled selectively, which also causes |
1739 | * kernel breakpoints (e.g., perf's) to be disabled. This might be |
1740 | * undesirable, but still seems reasonable as the code that runs in the |
1741 | * temporary mm should be short. |
1742 | */ |
1743 | if (hw_breakpoint_active()) |
1744 | hw_breakpoint_disable(); |
1745 | |
1746 | return temp_state; |
1747 | } |
1748 | |
1749 | static inline void unuse_temporary_mm(temp_mm_state_t prev_state) |
1750 | { |
1751 | lockdep_assert_irqs_disabled(); |
1752 | switch_mm_irqs_off(NULL, next: prev_state.mm, current); |
1753 | |
1754 | /* |
1755 | * Restore the breakpoints if they were disabled before the temporary mm |
1756 | * was loaded. |
1757 | */ |
1758 | if (hw_breakpoint_active()) |
1759 | hw_breakpoint_restore(); |
1760 | } |
1761 | |
1762 | __ro_after_init struct mm_struct *poking_mm; |
1763 | __ro_after_init unsigned long poking_addr; |
1764 | |
1765 | static void text_poke_memcpy(void *dst, const void *src, size_t len) |
1766 | { |
1767 | memcpy(dst, src, len); |
1768 | } |
1769 | |
1770 | static void text_poke_memset(void *dst, const void *src, size_t len) |
1771 | { |
1772 | int c = *(const int *)src; |
1773 | |
1774 | memset(dst, c, len); |
1775 | } |
1776 | |
1777 | typedef void text_poke_f(void *dst, const void *src, size_t len); |
1778 | |
1779 | static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t len) |
1780 | { |
1781 | bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE; |
1782 | struct page *pages[2] = {NULL}; |
1783 | temp_mm_state_t prev; |
1784 | unsigned long flags; |
1785 | pte_t pte, *ptep; |
1786 | spinlock_t *ptl; |
1787 | pgprot_t pgprot; |
1788 | |
1789 | /* |
1790 | * While boot memory allocator is running we cannot use struct pages as |
1791 | * they are not yet initialized. There is no way to recover. |
1792 | */ |
1793 | BUG_ON(!after_bootmem); |
1794 | |
1795 | if (!core_kernel_text(addr: (unsigned long)addr)) { |
1796 | pages[0] = vmalloc_to_page(addr); |
1797 | if (cross_page_boundary) |
1798 | pages[1] = vmalloc_to_page(addr: addr + PAGE_SIZE); |
1799 | } else { |
1800 | pages[0] = virt_to_page(addr); |
1801 | WARN_ON(!PageReserved(pages[0])); |
1802 | if (cross_page_boundary) |
1803 | pages[1] = virt_to_page(addr + PAGE_SIZE); |
1804 | } |
1805 | /* |
1806 | * If something went wrong, crash and burn since recovery paths are not |
1807 | * implemented. |
1808 | */ |
1809 | BUG_ON(!pages[0] || (cross_page_boundary && !pages[1])); |
1810 | |
1811 | /* |
1812 | * Map the page without the global bit, as TLB flushing is done with |
1813 | * flush_tlb_mm_range(), which is intended for non-global PTEs. |
1814 | */ |
1815 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL); |
1816 | |
1817 | /* |
1818 | * The lock is not really needed, but this allows to avoid open-coding. |
1819 | */ |
1820 | ptep = get_locked_pte(mm: poking_mm, addr: poking_addr, ptl: &ptl); |
1821 | |
1822 | /* |
1823 | * This must not fail; preallocated in poking_init(). |
1824 | */ |
1825 | VM_BUG_ON(!ptep); |
1826 | |
1827 | local_irq_save(flags); |
1828 | |
1829 | pte = mk_pte(pages[0], pgprot); |
1830 | set_pte_at(poking_mm, poking_addr, ptep, pte); |
1831 | |
1832 | if (cross_page_boundary) { |
1833 | pte = mk_pte(pages[1], pgprot); |
1834 | set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); |
1835 | } |
1836 | |
1837 | /* |
1838 | * Loading the temporary mm behaves as a compiler barrier, which |
1839 | * guarantees that the PTE will be set at the time memcpy() is done. |
1840 | */ |
1841 | prev = use_temporary_mm(mm: poking_mm); |
1842 | |
1843 | kasan_disable_current(); |
1844 | func((u8 *)poking_addr + offset_in_page(addr), src, len); |
1845 | kasan_enable_current(); |
1846 | |
1847 | /* |
1848 | * Ensure that the PTE is only cleared after the instructions of memcpy |
1849 | * were issued by using a compiler barrier. |
1850 | */ |
1851 | barrier(); |
1852 | |
1853 | pte_clear(mm: poking_mm, addr: poking_addr, ptep); |
1854 | if (cross_page_boundary) |
1855 | pte_clear(mm: poking_mm, addr: poking_addr + PAGE_SIZE, ptep: ptep + 1); |
1856 | |
1857 | /* |
1858 | * Loading the previous page-table hierarchy requires a serializing |
1859 | * instruction that already allows the core to see the updated version. |
1860 | * Xen-PV is assumed to serialize execution in a similar manner. |
1861 | */ |
1862 | unuse_temporary_mm(prev_state: prev); |
1863 | |
1864 | /* |
1865 | * Flushing the TLB might involve IPIs, which would require enabled |
1866 | * IRQs, but not if the mm is not used, as it is in this point. |
1867 | */ |
1868 | flush_tlb_mm_range(mm: poking_mm, start: poking_addr, end: poking_addr + |
1869 | (cross_page_boundary ? 2 : 1) * PAGE_SIZE, |
1870 | PAGE_SHIFT, freed_tables: false); |
1871 | |
1872 | if (func == text_poke_memcpy) { |
1873 | /* |
1874 | * If the text does not match what we just wrote then something is |
1875 | * fundamentally screwy; there's nothing we can really do about that. |
1876 | */ |
1877 | BUG_ON(memcmp(addr, src, len)); |
1878 | } |
1879 | |
1880 | local_irq_restore(flags); |
1881 | pte_unmap_unlock(ptep, ptl); |
1882 | return addr; |
1883 | } |
1884 | |
1885 | /** |
1886 | * text_poke - Update instructions on a live kernel |
1887 | * @addr: address to modify |
1888 | * @opcode: source of the copy |
1889 | * @len: length to copy |
1890 | * |
1891 | * Only atomic text poke/set should be allowed when not doing early patching. |
1892 | * It means the size must be writable atomically and the address must be aligned |
1893 | * in a way that permits an atomic write. It also makes sure we fit on a single |
1894 | * page. |
1895 | * |
1896 | * Note that the caller must ensure that if the modified code is part of a |
1897 | * module, the module would not be removed during poking. This can be achieved |
1898 | * by registering a module notifier, and ordering module removal and patching |
1899 | * trough a mutex. |
1900 | */ |
1901 | void *text_poke(void *addr, const void *opcode, size_t len) |
1902 | { |
1903 | lockdep_assert_held(&text_mutex); |
1904 | |
1905 | return __text_poke(func: text_poke_memcpy, addr, src: opcode, len); |
1906 | } |
1907 | |
1908 | /** |
1909 | * text_poke_kgdb - Update instructions on a live kernel by kgdb |
1910 | * @addr: address to modify |
1911 | * @opcode: source of the copy |
1912 | * @len: length to copy |
1913 | * |
1914 | * Only atomic text poke/set should be allowed when not doing early patching. |
1915 | * It means the size must be writable atomically and the address must be aligned |
1916 | * in a way that permits an atomic write. It also makes sure we fit on a single |
1917 | * page. |
1918 | * |
1919 | * Context: should only be used by kgdb, which ensures no other core is running, |
1920 | * despite the fact it does not hold the text_mutex. |
1921 | */ |
1922 | void *text_poke_kgdb(void *addr, const void *opcode, size_t len) |
1923 | { |
1924 | return __text_poke(func: text_poke_memcpy, addr, src: opcode, len); |
1925 | } |
1926 | |
1927 | void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, |
1928 | bool core_ok) |
1929 | { |
1930 | unsigned long start = (unsigned long)addr; |
1931 | size_t patched = 0; |
1932 | |
1933 | if (WARN_ON_ONCE(!core_ok && core_kernel_text(start))) |
1934 | return NULL; |
1935 | |
1936 | while (patched < len) { |
1937 | unsigned long ptr = start + patched; |
1938 | size_t s; |
1939 | |
1940 | s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); |
1941 | |
1942 | __text_poke(func: text_poke_memcpy, addr: (void *)ptr, src: opcode + patched, len: s); |
1943 | patched += s; |
1944 | } |
1945 | return addr; |
1946 | } |
1947 | |
1948 | /** |
1949 | * text_poke_copy - Copy instructions into (an unused part of) RX memory |
1950 | * @addr: address to modify |
1951 | * @opcode: source of the copy |
1952 | * @len: length to copy, could be more than 2x PAGE_SIZE |
1953 | * |
1954 | * Not safe against concurrent execution; useful for JITs to dump |
1955 | * new code blocks into unused regions of RX memory. Can be used in |
1956 | * conjunction with synchronize_rcu_tasks() to wait for existing |
1957 | * execution to quiesce after having made sure no existing functions |
1958 | * pointers are live. |
1959 | */ |
1960 | void *text_poke_copy(void *addr, const void *opcode, size_t len) |
1961 | { |
1962 | mutex_lock(&text_mutex); |
1963 | addr = text_poke_copy_locked(addr, opcode, len, core_ok: false); |
1964 | mutex_unlock(lock: &text_mutex); |
1965 | return addr; |
1966 | } |
1967 | |
1968 | /** |
1969 | * text_poke_set - memset into (an unused part of) RX memory |
1970 | * @addr: address to modify |
1971 | * @c: the byte to fill the area with |
1972 | * @len: length to copy, could be more than 2x PAGE_SIZE |
1973 | * |
1974 | * This is useful to overwrite unused regions of RX memory with illegal |
1975 | * instructions. |
1976 | */ |
1977 | void *text_poke_set(void *addr, int c, size_t len) |
1978 | { |
1979 | unsigned long start = (unsigned long)addr; |
1980 | size_t patched = 0; |
1981 | |
1982 | if (WARN_ON_ONCE(core_kernel_text(start))) |
1983 | return NULL; |
1984 | |
1985 | mutex_lock(&text_mutex); |
1986 | while (patched < len) { |
1987 | unsigned long ptr = start + patched; |
1988 | size_t s; |
1989 | |
1990 | s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); |
1991 | |
1992 | __text_poke(func: text_poke_memset, addr: (void *)ptr, src: (void *)&c, len: s); |
1993 | patched += s; |
1994 | } |
1995 | mutex_unlock(lock: &text_mutex); |
1996 | return addr; |
1997 | } |
1998 | |
1999 | static void do_sync_core(void *info) |
2000 | { |
2001 | sync_core(); |
2002 | } |
2003 | |
2004 | void text_poke_sync(void) |
2005 | { |
2006 | on_each_cpu(func: do_sync_core, NULL, wait: 1); |
2007 | } |
2008 | |
2009 | /* |
2010 | * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of |
2011 | * this thing. When len == 6 everything is prefixed with 0x0f and we map |
2012 | * opcode to Jcc.d8, using len to distinguish. |
2013 | */ |
2014 | struct text_poke_loc { |
2015 | /* addr := _stext + rel_addr */ |
2016 | s32 rel_addr; |
2017 | s32 disp; |
2018 | u8 len; |
2019 | u8 opcode; |
2020 | const u8 text[POKE_MAX_OPCODE_SIZE]; |
2021 | /* see text_poke_bp_batch() */ |
2022 | u8 old; |
2023 | }; |
2024 | |
2025 | struct bp_patching_desc { |
2026 | struct text_poke_loc *vec; |
2027 | int nr_entries; |
2028 | atomic_t refs; |
2029 | }; |
2030 | |
2031 | static struct bp_patching_desc bp_desc; |
2032 | |
2033 | static __always_inline |
2034 | struct bp_patching_desc *try_get_desc(void) |
2035 | { |
2036 | struct bp_patching_desc *desc = &bp_desc; |
2037 | |
2038 | if (!raw_atomic_inc_not_zero(v: &desc->refs)) |
2039 | return NULL; |
2040 | |
2041 | return desc; |
2042 | } |
2043 | |
2044 | static __always_inline void put_desc(void) |
2045 | { |
2046 | struct bp_patching_desc *desc = &bp_desc; |
2047 | |
2048 | smp_mb__before_atomic(); |
2049 | raw_atomic_dec(v: &desc->refs); |
2050 | } |
2051 | |
2052 | static __always_inline void *text_poke_addr(struct text_poke_loc *tp) |
2053 | { |
2054 | return _stext + tp->rel_addr; |
2055 | } |
2056 | |
2057 | static __always_inline int patch_cmp(const void *key, const void *elt) |
2058 | { |
2059 | struct text_poke_loc *tp = (struct text_poke_loc *) elt; |
2060 | |
2061 | if (key < text_poke_addr(tp)) |
2062 | return -1; |
2063 | if (key > text_poke_addr(tp)) |
2064 | return 1; |
2065 | return 0; |
2066 | } |
2067 | |
2068 | noinstr int poke_int3_handler(struct pt_regs *regs) |
2069 | { |
2070 | struct bp_patching_desc *desc; |
2071 | struct text_poke_loc *tp; |
2072 | int ret = 0; |
2073 | void *ip; |
2074 | |
2075 | if (user_mode(regs)) |
2076 | return 0; |
2077 | |
2078 | /* |
2079 | * Having observed our INT3 instruction, we now must observe |
2080 | * bp_desc with non-zero refcount: |
2081 | * |
2082 | * bp_desc.refs = 1 INT3 |
2083 | * WMB RMB |
2084 | * write INT3 if (bp_desc.refs != 0) |
2085 | */ |
2086 | smp_rmb(); |
2087 | |
2088 | desc = try_get_desc(); |
2089 | if (!desc) |
2090 | return 0; |
2091 | |
2092 | /* |
2093 | * Discount the INT3. See text_poke_bp_batch(). |
2094 | */ |
2095 | ip = (void *) regs->ip - INT3_INSN_SIZE; |
2096 | |
2097 | /* |
2098 | * Skip the binary search if there is a single member in the vector. |
2099 | */ |
2100 | if (unlikely(desc->nr_entries > 1)) { |
2101 | tp = __inline_bsearch(key: ip, base: desc->vec, num: desc->nr_entries, |
2102 | size: sizeof(struct text_poke_loc), |
2103 | cmp: patch_cmp); |
2104 | if (!tp) |
2105 | goto out_put; |
2106 | } else { |
2107 | tp = desc->vec; |
2108 | if (text_poke_addr(tp) != ip) |
2109 | goto out_put; |
2110 | } |
2111 | |
2112 | ip += tp->len; |
2113 | |
2114 | switch (tp->opcode) { |
2115 | case INT3_INSN_OPCODE: |
2116 | /* |
2117 | * Someone poked an explicit INT3, they'll want to handle it, |
2118 | * do not consume. |
2119 | */ |
2120 | goto out_put; |
2121 | |
2122 | case RET_INSN_OPCODE: |
2123 | int3_emulate_ret(regs); |
2124 | break; |
2125 | |
2126 | case CALL_INSN_OPCODE: |
2127 | int3_emulate_call(regs, func: (long)ip + tp->disp); |
2128 | break; |
2129 | |
2130 | case JMP32_INSN_OPCODE: |
2131 | case JMP8_INSN_OPCODE: |
2132 | int3_emulate_jmp(regs, ip: (long)ip + tp->disp); |
2133 | break; |
2134 | |
2135 | case 0x70 ... 0x7f: /* Jcc */ |
2136 | int3_emulate_jcc(regs, cc: tp->opcode & 0xf, ip: (long)ip, disp: tp->disp); |
2137 | break; |
2138 | |
2139 | default: |
2140 | BUG(); |
2141 | } |
2142 | |
2143 | ret = 1; |
2144 | |
2145 | out_put: |
2146 | put_desc(); |
2147 | return ret; |
2148 | } |
2149 | |
2150 | #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc)) |
2151 | static struct text_poke_loc tp_vec[TP_VEC_MAX]; |
2152 | static int tp_vec_nr; |
2153 | |
2154 | /** |
2155 | * text_poke_bp_batch() -- update instructions on live kernel on SMP |
2156 | * @tp: vector of instructions to patch |
2157 | * @nr_entries: number of entries in the vector |
2158 | * |
2159 | * Modify multi-byte instruction by using int3 breakpoint on SMP. |
2160 | * We completely avoid stop_machine() here, and achieve the |
2161 | * synchronization using int3 breakpoint. |
2162 | * |
2163 | * The way it is done: |
2164 | * - For each entry in the vector: |
2165 | * - add a int3 trap to the address that will be patched |
2166 | * - sync cores |
2167 | * - For each entry in the vector: |
2168 | * - update all but the first byte of the patched range |
2169 | * - sync cores |
2170 | * - For each entry in the vector: |
2171 | * - replace the first byte (int3) by the first byte of |
2172 | * replacing opcode |
2173 | * - sync cores |
2174 | */ |
2175 | static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries) |
2176 | { |
2177 | unsigned char int3 = INT3_INSN_OPCODE; |
2178 | unsigned int i; |
2179 | int do_sync; |
2180 | |
2181 | lockdep_assert_held(&text_mutex); |
2182 | |
2183 | bp_desc.vec = tp; |
2184 | bp_desc.nr_entries = nr_entries; |
2185 | |
2186 | /* |
2187 | * Corresponds to the implicit memory barrier in try_get_desc() to |
2188 | * ensure reading a non-zero refcount provides up to date bp_desc data. |
2189 | */ |
2190 | atomic_set_release(v: &bp_desc.refs, i: 1); |
2191 | |
2192 | /* |
2193 | * Function tracing can enable thousands of places that need to be |
2194 | * updated. This can take quite some time, and with full kernel debugging |
2195 | * enabled, this could cause the softlockup watchdog to trigger. |
2196 | * This function gets called every 256 entries added to be patched. |
2197 | * Call cond_resched() here to make sure that other tasks can get scheduled |
2198 | * while processing all the functions being patched. |
2199 | */ |
2200 | cond_resched(); |
2201 | |
2202 | /* |
2203 | * Corresponding read barrier in int3 notifier for making sure the |
2204 | * nr_entries and handler are correctly ordered wrt. patching. |
2205 | */ |
2206 | smp_wmb(); |
2207 | |
2208 | /* |
2209 | * First step: add a int3 trap to the address that will be patched. |
2210 | */ |
2211 | for (i = 0; i < nr_entries; i++) { |
2212 | tp[i].old = *(u8 *)text_poke_addr(tp: &tp[i]); |
2213 | text_poke(addr: text_poke_addr(tp: &tp[i]), opcode: &int3, INT3_INSN_SIZE); |
2214 | } |
2215 | |
2216 | text_poke_sync(); |
2217 | |
2218 | /* |
2219 | * Second step: update all but the first byte of the patched range. |
2220 | */ |
2221 | for (do_sync = 0, i = 0; i < nr_entries; i++) { |
2222 | u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, }; |
2223 | u8 _new[POKE_MAX_OPCODE_SIZE+1]; |
2224 | const u8 *new = tp[i].text; |
2225 | int len = tp[i].len; |
2226 | |
2227 | if (len - INT3_INSN_SIZE > 0) { |
2228 | memcpy(old + INT3_INSN_SIZE, |
2229 | text_poke_addr(tp: &tp[i]) + INT3_INSN_SIZE, |
2230 | len - INT3_INSN_SIZE); |
2231 | |
2232 | if (len == 6) { |
2233 | _new[0] = 0x0f; |
2234 | memcpy(_new + 1, new, 5); |
2235 | new = _new; |
2236 | } |
2237 | |
2238 | text_poke(addr: text_poke_addr(tp: &tp[i]) + INT3_INSN_SIZE, |
2239 | opcode: new + INT3_INSN_SIZE, |
2240 | len: len - INT3_INSN_SIZE); |
2241 | |
2242 | do_sync++; |
2243 | } |
2244 | |
2245 | /* |
2246 | * Emit a perf event to record the text poke, primarily to |
2247 | * support Intel PT decoding which must walk the executable code |
2248 | * to reconstruct the trace. The flow up to here is: |
2249 | * - write INT3 byte |
2250 | * - IPI-SYNC |
2251 | * - write instruction tail |
2252 | * At this point the actual control flow will be through the |
2253 | * INT3 and handler and not hit the old or new instruction. |
2254 | * Intel PT outputs FUP/TIP packets for the INT3, so the flow |
2255 | * can still be decoded. Subsequently: |
2256 | * - emit RECORD_TEXT_POKE with the new instruction |
2257 | * - IPI-SYNC |
2258 | * - write first byte |
2259 | * - IPI-SYNC |
2260 | * So before the text poke event timestamp, the decoder will see |
2261 | * either the old instruction flow or FUP/TIP of INT3. After the |
2262 | * text poke event timestamp, the decoder will see either the |
2263 | * new instruction flow or FUP/TIP of INT3. Thus decoders can |
2264 | * use the timestamp as the point at which to modify the |
2265 | * executable code. |
2266 | * The old instruction is recorded so that the event can be |
2267 | * processed forwards or backwards. |
2268 | */ |
2269 | perf_event_text_poke(addr: text_poke_addr(tp: &tp[i]), old_bytes: old, old_len: len, new_bytes: new, new_len: len); |
2270 | } |
2271 | |
2272 | if (do_sync) { |
2273 | /* |
2274 | * According to Intel, this core syncing is very likely |
2275 | * not necessary and we'd be safe even without it. But |
2276 | * better safe than sorry (plus there's not only Intel). |
2277 | */ |
2278 | text_poke_sync(); |
2279 | } |
2280 | |
2281 | /* |
2282 | * Third step: replace the first byte (int3) by the first byte of |
2283 | * replacing opcode. |
2284 | */ |
2285 | for (do_sync = 0, i = 0; i < nr_entries; i++) { |
2286 | u8 byte = tp[i].text[0]; |
2287 | |
2288 | if (tp[i].len == 6) |
2289 | byte = 0x0f; |
2290 | |
2291 | if (byte == INT3_INSN_OPCODE) |
2292 | continue; |
2293 | |
2294 | text_poke(addr: text_poke_addr(tp: &tp[i]), opcode: &byte, INT3_INSN_SIZE); |
2295 | do_sync++; |
2296 | } |
2297 | |
2298 | if (do_sync) |
2299 | text_poke_sync(); |
2300 | |
2301 | /* |
2302 | * Remove and wait for refs to be zero. |
2303 | */ |
2304 | if (!atomic_dec_and_test(v: &bp_desc.refs)) |
2305 | atomic_cond_read_acquire(&bp_desc.refs, !VAL); |
2306 | } |
2307 | |
2308 | static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, |
2309 | const void *opcode, size_t len, const void *emulate) |
2310 | { |
2311 | struct insn insn; |
2312 | int ret, i = 0; |
2313 | |
2314 | if (len == 6) |
2315 | i = 1; |
2316 | memcpy((void *)tp->text, opcode+i, len-i); |
2317 | if (!emulate) |
2318 | emulate = opcode; |
2319 | |
2320 | ret = insn_decode_kernel(&insn, emulate); |
2321 | BUG_ON(ret < 0); |
2322 | |
2323 | tp->rel_addr = addr - (void *)_stext; |
2324 | tp->len = len; |
2325 | tp->opcode = insn.opcode.bytes[0]; |
2326 | |
2327 | if (is_jcc32(insn: &insn)) { |
2328 | /* |
2329 | * Map Jcc.d32 onto Jcc.d8 and use len to distinguish. |
2330 | */ |
2331 | tp->opcode = insn.opcode.bytes[1] - 0x10; |
2332 | } |
2333 | |
2334 | switch (tp->opcode) { |
2335 | case RET_INSN_OPCODE: |
2336 | case JMP32_INSN_OPCODE: |
2337 | case JMP8_INSN_OPCODE: |
2338 | /* |
2339 | * Control flow instructions without implied execution of the |
2340 | * next instruction can be padded with INT3. |
2341 | */ |
2342 | for (i = insn.length; i < len; i++) |
2343 | BUG_ON(tp->text[i] != INT3_INSN_OPCODE); |
2344 | break; |
2345 | |
2346 | default: |
2347 | BUG_ON(len != insn.length); |
2348 | } |
2349 | |
2350 | switch (tp->opcode) { |
2351 | case INT3_INSN_OPCODE: |
2352 | case RET_INSN_OPCODE: |
2353 | break; |
2354 | |
2355 | case CALL_INSN_OPCODE: |
2356 | case JMP32_INSN_OPCODE: |
2357 | case JMP8_INSN_OPCODE: |
2358 | case 0x70 ... 0x7f: /* Jcc */ |
2359 | tp->disp = insn.immediate.value; |
2360 | break; |
2361 | |
2362 | default: /* assume NOP */ |
2363 | switch (len) { |
2364 | case 2: /* NOP2 -- emulate as JMP8+0 */ |
2365 | BUG_ON(memcmp(emulate, x86_nops[len], len)); |
2366 | tp->opcode = JMP8_INSN_OPCODE; |
2367 | tp->disp = 0; |
2368 | break; |
2369 | |
2370 | case 5: /* NOP5 -- emulate as JMP32+0 */ |
2371 | BUG_ON(memcmp(emulate, x86_nops[len], len)); |
2372 | tp->opcode = JMP32_INSN_OPCODE; |
2373 | tp->disp = 0; |
2374 | break; |
2375 | |
2376 | default: /* unknown instruction */ |
2377 | BUG(); |
2378 | } |
2379 | break; |
2380 | } |
2381 | } |
2382 | |
2383 | /* |
2384 | * We hard rely on the tp_vec being ordered; ensure this is so by flushing |
2385 | * early if needed. |
2386 | */ |
2387 | static bool tp_order_fail(void *addr) |
2388 | { |
2389 | struct text_poke_loc *tp; |
2390 | |
2391 | if (!tp_vec_nr) |
2392 | return false; |
2393 | |
2394 | if (!addr) /* force */ |
2395 | return true; |
2396 | |
2397 | tp = &tp_vec[tp_vec_nr - 1]; |
2398 | if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr) |
2399 | return true; |
2400 | |
2401 | return false; |
2402 | } |
2403 | |
2404 | static void text_poke_flush(void *addr) |
2405 | { |
2406 | if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) { |
2407 | text_poke_bp_batch(tp: tp_vec, nr_entries: tp_vec_nr); |
2408 | tp_vec_nr = 0; |
2409 | } |
2410 | } |
2411 | |
2412 | void text_poke_finish(void) |
2413 | { |
2414 | text_poke_flush(NULL); |
2415 | } |
2416 | |
2417 | void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate) |
2418 | { |
2419 | struct text_poke_loc *tp; |
2420 | |
2421 | text_poke_flush(addr); |
2422 | |
2423 | tp = &tp_vec[tp_vec_nr++]; |
2424 | text_poke_loc_init(tp, addr, opcode, len, emulate); |
2425 | } |
2426 | |
2427 | /** |
2428 | * text_poke_bp() -- update instructions on live kernel on SMP |
2429 | * @addr: address to patch |
2430 | * @opcode: opcode of new instruction |
2431 | * @len: length to copy |
2432 | * @emulate: instruction to be emulated |
2433 | * |
2434 | * Update a single instruction with the vector in the stack, avoiding |
2435 | * dynamically allocated memory. This function should be used when it is |
2436 | * not possible to allocate memory. |
2437 | */ |
2438 | void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate) |
2439 | { |
2440 | struct text_poke_loc tp; |
2441 | |
2442 | text_poke_loc_init(tp: &tp, addr, opcode, len, emulate); |
2443 | text_poke_bp_batch(tp: &tp, nr_entries: 1); |
2444 | } |
2445 | |