1 | /* |
2 | * Copyright (C) 1991, 1992 Linus Torvalds |
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
4 | * |
5 | * Pentium III FXSR, SSE support |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
7 | */ |
8 | |
9 | /* |
10 | * Handle hardware traps and faults. |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/context_tracking.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kallsyms.h> |
18 | #include <linux/kmsan.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/kprobes.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/kdebug.h> |
23 | #include <linux/kgdb.h> |
24 | #include <linux/kernel.h> |
25 | #include <linux/export.h> |
26 | #include <linux/ptrace.h> |
27 | #include <linux/uprobes.h> |
28 | #include <linux/string.h> |
29 | #include <linux/delay.h> |
30 | #include <linux/errno.h> |
31 | #include <linux/kexec.h> |
32 | #include <linux/sched.h> |
33 | #include <linux/sched/task_stack.h> |
34 | #include <linux/timer.h> |
35 | #include <linux/init.h> |
36 | #include <linux/bug.h> |
37 | #include <linux/nmi.h> |
38 | #include <linux/mm.h> |
39 | #include <linux/smp.h> |
40 | #include <linux/io.h> |
41 | #include <linux/hardirq.h> |
42 | #include <linux/atomic.h> |
43 | #include <linux/iommu.h> |
44 | |
45 | #include <asm/stacktrace.h> |
46 | #include <asm/processor.h> |
47 | #include <asm/debugreg.h> |
48 | #include <asm/realmode.h> |
49 | #include <asm/text-patching.h> |
50 | #include <asm/ftrace.h> |
51 | #include <asm/traps.h> |
52 | #include <asm/desc.h> |
53 | #include <asm/fpu/api.h> |
54 | #include <asm/cpu.h> |
55 | #include <asm/cpu_entry_area.h> |
56 | #include <asm/mce.h> |
57 | #include <asm/fixmap.h> |
58 | #include <asm/mach_traps.h> |
59 | #include <asm/alternative.h> |
60 | #include <asm/fpu/xstate.h> |
61 | #include <asm/vm86.h> |
62 | #include <asm/umip.h> |
63 | #include <asm/insn.h> |
64 | #include <asm/insn-eval.h> |
65 | #include <asm/vdso.h> |
66 | #include <asm/tdx.h> |
67 | #include <asm/cfi.h> |
68 | |
69 | #ifdef CONFIG_X86_64 |
70 | #include <asm/x86_init.h> |
71 | #else |
72 | #include <asm/processor-flags.h> |
73 | #include <asm/setup.h> |
74 | #endif |
75 | |
76 | #include <asm/proto.h> |
77 | |
78 | DECLARE_BITMAP(system_vectors, NR_VECTORS); |
79 | |
80 | __always_inline int is_valid_bugaddr(unsigned long addr) |
81 | { |
82 | if (addr < TASK_SIZE_MAX) |
83 | return 0; |
84 | |
85 | /* |
86 | * We got #UD, if the text isn't readable we'd have gotten |
87 | * a different exception. |
88 | */ |
89 | return *(unsigned short *)addr == INSN_UD2; |
90 | } |
91 | |
92 | static nokprobe_inline int |
93 | do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str, |
94 | struct pt_regs *regs, long error_code) |
95 | { |
96 | if (v8086_mode(regs)) { |
97 | /* |
98 | * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. |
99 | * On nmi (interrupt 2), do_trap should not be called. |
100 | */ |
101 | if (trapnr < X86_TRAP_UD) { |
102 | if (!handle_vm86_trap(a: (struct kernel_vm86_regs *) regs, |
103 | b: error_code, c: trapnr)) |
104 | return 0; |
105 | } |
106 | } else if (!user_mode(regs)) { |
107 | if (fixup_exception(regs, trapnr, error_code, fault_addr: 0)) |
108 | return 0; |
109 | |
110 | tsk->thread.error_code = error_code; |
111 | tsk->thread.trap_nr = trapnr; |
112 | die(str, regs, error_code); |
113 | } else { |
114 | if (fixup_vdso_exception(regs, trapnr, error_code, fault_addr: 0)) |
115 | return 0; |
116 | } |
117 | |
118 | /* |
119 | * We want error_code and trap_nr set for userspace faults and |
120 | * kernelspace faults which result in die(), but not |
121 | * kernelspace faults which are fixed up. die() gives the |
122 | * process no chance to handle the signal and notice the |
123 | * kernel fault information, so that won't result in polluting |
124 | * the information about previously queued, but not yet |
125 | * delivered, faults. See also exc_general_protection below. |
126 | */ |
127 | tsk->thread.error_code = error_code; |
128 | tsk->thread.trap_nr = trapnr; |
129 | |
130 | return -1; |
131 | } |
132 | |
133 | static void show_signal(struct task_struct *tsk, int signr, |
134 | const char *type, const char *desc, |
135 | struct pt_regs *regs, long error_code) |
136 | { |
137 | if (show_unhandled_signals && unhandled_signal(tsk, sig: signr) && |
138 | printk_ratelimit()) { |
139 | pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx" , |
140 | tsk->comm, task_pid_nr(tsk), type, desc, |
141 | regs->ip, regs->sp, error_code); |
142 | print_vma_addr(KERN_CONT " in " , rip: regs->ip); |
143 | pr_cont("\n" ); |
144 | } |
145 | } |
146 | |
147 | static void |
148 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
149 | long error_code, int sicode, void __user *addr) |
150 | { |
151 | struct task_struct *tsk = current; |
152 | |
153 | if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) |
154 | return; |
155 | |
156 | show_signal(tsk, signr, type: "trap " , desc: str, regs, error_code); |
157 | |
158 | if (!sicode) |
159 | force_sig(signr); |
160 | else |
161 | force_sig_fault(sig: signr, code: sicode, addr); |
162 | } |
163 | NOKPROBE_SYMBOL(do_trap); |
164 | |
165 | static void do_error_trap(struct pt_regs *regs, long error_code, char *str, |
166 | unsigned long trapnr, int signr, int sicode, void __user *addr) |
167 | { |
168 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU" ); |
169 | |
170 | if (notify_die(val: DIE_TRAP, str, regs, err: error_code, trap: trapnr, sig: signr) != |
171 | NOTIFY_STOP) { |
172 | cond_local_irq_enable(regs); |
173 | do_trap(trapnr, signr, str, regs, error_code, sicode, addr); |
174 | cond_local_irq_disable(regs); |
175 | } |
176 | } |
177 | |
178 | /* |
179 | * Posix requires to provide the address of the faulting instruction for |
180 | * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t. |
181 | * |
182 | * This address is usually regs->ip, but when an uprobe moved the code out |
183 | * of line then regs->ip points to the XOL code which would confuse |
184 | * anything which analyzes the fault address vs. the unmodified binary. If |
185 | * a trap happened in XOL code then uprobe maps regs->ip back to the |
186 | * original instruction address. |
187 | */ |
188 | static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs) |
189 | { |
190 | return (void __user *)uprobe_get_trap_addr(regs); |
191 | } |
192 | |
193 | DEFINE_IDTENTRY(exc_divide_error) |
194 | { |
195 | do_error_trap(regs, error_code: 0, str: "divide error" , X86_TRAP_DE, SIGFPE, |
196 | FPE_INTDIV, addr: error_get_trap_addr(regs)); |
197 | } |
198 | |
199 | DEFINE_IDTENTRY(exc_overflow) |
200 | { |
201 | do_error_trap(regs, error_code: 0, str: "overflow" , X86_TRAP_OF, SIGSEGV, sicode: 0, NULL); |
202 | } |
203 | |
204 | #ifdef CONFIG_X86_F00F_BUG |
205 | void handle_invalid_op(struct pt_regs *regs) |
206 | #else |
207 | static inline void handle_invalid_op(struct pt_regs *regs) |
208 | #endif |
209 | { |
210 | do_error_trap(regs, error_code: 0, str: "invalid opcode" , X86_TRAP_UD, SIGILL, |
211 | ILL_ILLOPN, addr: error_get_trap_addr(regs)); |
212 | } |
213 | |
214 | static noinstr bool handle_bug(struct pt_regs *regs) |
215 | { |
216 | bool handled = false; |
217 | |
218 | /* |
219 | * Normally @regs are unpoisoned by irqentry_enter(), but handle_bug() |
220 | * is a rare case that uses @regs without passing them to |
221 | * irqentry_enter(). |
222 | */ |
223 | kmsan_unpoison_entry_regs(regs); |
224 | if (!is_valid_bugaddr(addr: regs->ip)) |
225 | return handled; |
226 | |
227 | /* |
228 | * All lies, just get the WARN/BUG out. |
229 | */ |
230 | instrumentation_begin(); |
231 | /* |
232 | * Since we're emulating a CALL with exceptions, restore the interrupt |
233 | * state to what it was at the exception site. |
234 | */ |
235 | if (regs->flags & X86_EFLAGS_IF) |
236 | raw_local_irq_enable(); |
237 | if (report_bug(bug_addr: regs->ip, regs) == BUG_TRAP_TYPE_WARN || |
238 | handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { |
239 | regs->ip += LEN_UD2; |
240 | handled = true; |
241 | } |
242 | if (regs->flags & X86_EFLAGS_IF) |
243 | raw_local_irq_disable(); |
244 | instrumentation_end(); |
245 | |
246 | return handled; |
247 | } |
248 | |
249 | DEFINE_IDTENTRY_RAW(exc_invalid_op) |
250 | { |
251 | irqentry_state_t state; |
252 | |
253 | /* |
254 | * We use UD2 as a short encoding for 'CALL __WARN', as such |
255 | * handle it before exception entry to avoid recursive WARN |
256 | * in case exception entry is the one triggering WARNs. |
257 | */ |
258 | if (!user_mode(regs) && handle_bug(regs)) |
259 | return; |
260 | |
261 | state = irqentry_enter(regs); |
262 | instrumentation_begin(); |
263 | handle_invalid_op(regs); |
264 | instrumentation_end(); |
265 | irqentry_exit(regs, state); |
266 | } |
267 | |
268 | DEFINE_IDTENTRY(exc_coproc_segment_overrun) |
269 | { |
270 | do_error_trap(regs, error_code: 0, str: "coprocessor segment overrun" , |
271 | X86_TRAP_OLD_MF, SIGFPE, sicode: 0, NULL); |
272 | } |
273 | |
274 | DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss) |
275 | { |
276 | do_error_trap(regs, error_code, str: "invalid TSS" , X86_TRAP_TS, SIGSEGV, |
277 | sicode: 0, NULL); |
278 | } |
279 | |
280 | DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present) |
281 | { |
282 | do_error_trap(regs, error_code, str: "segment not present" , X86_TRAP_NP, |
283 | SIGBUS, sicode: 0, NULL); |
284 | } |
285 | |
286 | DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment) |
287 | { |
288 | do_error_trap(regs, error_code, str: "stack segment" , X86_TRAP_SS, SIGBUS, |
289 | sicode: 0, NULL); |
290 | } |
291 | |
292 | DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check) |
293 | { |
294 | char *str = "alignment check" ; |
295 | |
296 | if (notify_die(val: DIE_TRAP, str, regs, err: error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) |
297 | return; |
298 | |
299 | if (!user_mode(regs)) |
300 | die("Split lock detected\n" , regs, error_code); |
301 | |
302 | local_irq_enable(); |
303 | |
304 | if (handle_user_split_lock(regs, error_code)) |
305 | goto out; |
306 | |
307 | do_trap(X86_TRAP_AC, SIGBUS, str: "alignment check" , regs, |
308 | error_code, BUS_ADRALN, NULL); |
309 | |
310 | out: |
311 | local_irq_disable(); |
312 | } |
313 | |
314 | #ifdef CONFIG_VMAP_STACK |
315 | __visible void __noreturn handle_stack_overflow(struct pt_regs *regs, |
316 | unsigned long fault_address, |
317 | struct stack_info *info) |
318 | { |
319 | const char *name = stack_type_name(type: info->type); |
320 | |
321 | printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n" , |
322 | name, (void *)fault_address, info->begin, info->end); |
323 | |
324 | die("stack guard page" , regs, 0); |
325 | |
326 | /* Be absolutely certain we don't return. */ |
327 | panic(fmt: "%s stack guard hit" , name); |
328 | } |
329 | #endif |
330 | |
331 | /* |
332 | * Runs on an IST stack for x86_64 and on a special task stack for x86_32. |
333 | * |
334 | * On x86_64, this is more or less a normal kernel entry. Notwithstanding the |
335 | * SDM's warnings about double faults being unrecoverable, returning works as |
336 | * expected. Presumably what the SDM actually means is that the CPU may get |
337 | * the register state wrong on entry, so returning could be a bad idea. |
338 | * |
339 | * Various CPU engineers have promised that double faults due to an IRET fault |
340 | * while the stack is read-only are, in fact, recoverable. |
341 | * |
342 | * On x86_32, this is entered through a task gate, and regs are synthesized |
343 | * from the TSS. Returning is, in principle, okay, but changes to regs will |
344 | * be lost. If, for some reason, we need to return to a context with modified |
345 | * regs, the shim code could be adjusted to synchronize the registers. |
346 | * |
347 | * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs |
348 | * to be read before doing anything else. |
349 | */ |
350 | DEFINE_IDTENTRY_DF(exc_double_fault) |
351 | { |
352 | static const char str[] = "double fault" ; |
353 | struct task_struct *tsk = current; |
354 | |
355 | #ifdef CONFIG_VMAP_STACK |
356 | unsigned long address = read_cr2(); |
357 | struct stack_info info; |
358 | #endif |
359 | |
360 | #ifdef CONFIG_X86_ESPFIX64 |
361 | extern unsigned char native_irq_return_iret[]; |
362 | |
363 | /* |
364 | * If IRET takes a non-IST fault on the espfix64 stack, then we |
365 | * end up promoting it to a doublefault. In that case, take |
366 | * advantage of the fact that we're not using the normal (TSS.sp0) |
367 | * stack right now. We can write a fake #GP(0) frame at TSS.sp0 |
368 | * and then modify our own IRET frame so that, when we return, |
369 | * we land directly at the #GP(0) vector with the stack already |
370 | * set up according to its expectations. |
371 | * |
372 | * The net result is that our #GP handler will think that we |
373 | * entered from usermode with the bad user context. |
374 | * |
375 | * No need for nmi_enter() here because we don't use RCU. |
376 | */ |
377 | if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && |
378 | regs->cs == __KERNEL_CS && |
379 | regs->ip == (unsigned long)native_irq_return_iret) |
380 | { |
381 | struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; |
382 | unsigned long *p = (unsigned long *)regs->sp; |
383 | |
384 | /* |
385 | * regs->sp points to the failing IRET frame on the |
386 | * ESPFIX64 stack. Copy it to the entry stack. This fills |
387 | * in gpregs->ss through gpregs->ip. |
388 | * |
389 | */ |
390 | gpregs->ip = p[0]; |
391 | gpregs->cs = p[1]; |
392 | gpregs->flags = p[2]; |
393 | gpregs->sp = p[3]; |
394 | gpregs->ss = p[4]; |
395 | gpregs->orig_ax = 0; /* Missing (lost) #GP error code */ |
396 | |
397 | /* |
398 | * Adjust our frame so that we return straight to the #GP |
399 | * vector with the expected RSP value. This is safe because |
400 | * we won't enable interrupts or schedule before we invoke |
401 | * general_protection, so nothing will clobber the stack |
402 | * frame we just set up. |
403 | * |
404 | * We will enter general_protection with kernel GSBASE, |
405 | * which is what the stub expects, given that the faulting |
406 | * RIP will be the IRET instruction. |
407 | */ |
408 | regs->ip = (unsigned long)asm_exc_general_protection; |
409 | regs->sp = (unsigned long)&gpregs->orig_ax; |
410 | |
411 | return; |
412 | } |
413 | #endif |
414 | |
415 | irqentry_nmi_enter(regs); |
416 | instrumentation_begin(); |
417 | notify_die(val: DIE_TRAP, str, regs, err: error_code, X86_TRAP_DF, SIGSEGV); |
418 | |
419 | tsk->thread.error_code = error_code; |
420 | tsk->thread.trap_nr = X86_TRAP_DF; |
421 | |
422 | #ifdef CONFIG_VMAP_STACK |
423 | /* |
424 | * If we overflow the stack into a guard page, the CPU will fail |
425 | * to deliver #PF and will send #DF instead. Similarly, if we |
426 | * take any non-IST exception while too close to the bottom of |
427 | * the stack, the processor will get a page fault while |
428 | * delivering the exception and will generate a double fault. |
429 | * |
430 | * According to the SDM (footnote in 6.15 under "Interrupt 14 - |
431 | * Page-Fault Exception (#PF): |
432 | * |
433 | * Processors update CR2 whenever a page fault is detected. If a |
434 | * second page fault occurs while an earlier page fault is being |
435 | * delivered, the faulting linear address of the second fault will |
436 | * overwrite the contents of CR2 (replacing the previous |
437 | * address). These updates to CR2 occur even if the page fault |
438 | * results in a double fault or occurs during the delivery of a |
439 | * double fault. |
440 | * |
441 | * The logic below has a small possibility of incorrectly diagnosing |
442 | * some errors as stack overflows. For example, if the IDT or GDT |
443 | * gets corrupted such that #GP delivery fails due to a bad descriptor |
444 | * causing #GP and we hit this condition while CR2 coincidentally |
445 | * points to the stack guard page, we'll think we overflowed the |
446 | * stack. Given that we're going to panic one way or another |
447 | * if this happens, this isn't necessarily worth fixing. |
448 | * |
449 | * If necessary, we could improve the test by only diagnosing |
450 | * a stack overflow if the saved RSP points within 47 bytes of |
451 | * the bottom of the stack: if RSP == tsk_stack + 48 and we |
452 | * take an exception, the stack is already aligned and there |
453 | * will be enough room SS, RSP, RFLAGS, CS, RIP, and a |
454 | * possible error code, so a stack overflow would *not* double |
455 | * fault. With any less space left, exception delivery could |
456 | * fail, and, as a practical matter, we've overflowed the |
457 | * stack even if the actual trigger for the double fault was |
458 | * something else. |
459 | */ |
460 | if (get_stack_guard_info(stack: (void *)address, info: &info)) |
461 | handle_stack_overflow(regs, fault_address: address, info: &info); |
462 | #endif |
463 | |
464 | pr_emerg("PANIC: double fault, error_code: 0x%lx\n" , error_code); |
465 | die("double fault" , regs, error_code); |
466 | panic(fmt: "Machine halted." ); |
467 | instrumentation_end(); |
468 | } |
469 | |
470 | DEFINE_IDTENTRY(exc_bounds) |
471 | { |
472 | if (notify_die(val: DIE_TRAP, str: "bounds" , regs, err: 0, |
473 | X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) |
474 | return; |
475 | cond_local_irq_enable(regs); |
476 | |
477 | if (!user_mode(regs)) |
478 | die("bounds" , regs, 0); |
479 | |
480 | do_trap(X86_TRAP_BR, SIGSEGV, str: "bounds" , regs, error_code: 0, sicode: 0, NULL); |
481 | |
482 | cond_local_irq_disable(regs); |
483 | } |
484 | |
485 | enum kernel_gp_hint { |
486 | GP_NO_HINT, |
487 | GP_NON_CANONICAL, |
488 | GP_CANONICAL |
489 | }; |
490 | |
491 | /* |
492 | * When an uncaught #GP occurs, try to determine the memory address accessed by |
493 | * the instruction and return that address to the caller. Also, try to figure |
494 | * out whether any part of the access to that address was non-canonical. |
495 | */ |
496 | static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs, |
497 | unsigned long *addr) |
498 | { |
499 | u8 insn_buf[MAX_INSN_SIZE]; |
500 | struct insn insn; |
501 | int ret; |
502 | |
503 | if (copy_from_kernel_nofault(dst: insn_buf, src: (void *)regs->ip, |
504 | MAX_INSN_SIZE)) |
505 | return GP_NO_HINT; |
506 | |
507 | ret = insn_decode_kernel(&insn, insn_buf); |
508 | if (ret < 0) |
509 | return GP_NO_HINT; |
510 | |
511 | *addr = (unsigned long)insn_get_addr_ref(insn: &insn, regs); |
512 | if (*addr == -1UL) |
513 | return GP_NO_HINT; |
514 | |
515 | #ifdef CONFIG_X86_64 |
516 | /* |
517 | * Check that: |
518 | * - the operand is not in the kernel half |
519 | * - the last byte of the operand is not in the user canonical half |
520 | */ |
521 | if (*addr < ~__VIRTUAL_MASK && |
522 | *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK) |
523 | return GP_NON_CANONICAL; |
524 | #endif |
525 | |
526 | return GP_CANONICAL; |
527 | } |
528 | |
529 | #define GPFSTR "general protection fault" |
530 | |
531 | static bool fixup_iopl_exception(struct pt_regs *regs) |
532 | { |
533 | struct thread_struct *t = ¤t->thread; |
534 | unsigned char byte; |
535 | unsigned long ip; |
536 | |
537 | if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3) |
538 | return false; |
539 | |
540 | if (insn_get_effective_ip(regs, ip: &ip)) |
541 | return false; |
542 | |
543 | if (get_user(byte, (const char __user *)ip)) |
544 | return false; |
545 | |
546 | if (byte != 0xfa && byte != 0xfb) |
547 | return false; |
548 | |
549 | if (!t->iopl_warn && printk_ratelimit()) { |
550 | pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx" , |
551 | current->comm, task_pid_nr(current), ip); |
552 | print_vma_addr(KERN_CONT " in " , rip: ip); |
553 | pr_cont("\n" ); |
554 | t->iopl_warn = 1; |
555 | } |
556 | |
557 | regs->ip += 1; |
558 | return true; |
559 | } |
560 | |
561 | /* |
562 | * The unprivileged ENQCMD instruction generates #GPs if the |
563 | * IA32_PASID MSR has not been populated. If possible, populate |
564 | * the MSR from a PASID previously allocated to the mm. |
565 | */ |
566 | static bool try_fixup_enqcmd_gp(void) |
567 | { |
568 | #ifdef CONFIG_IOMMU_SVA |
569 | u32 pasid; |
570 | |
571 | /* |
572 | * MSR_IA32_PASID is managed using XSAVE. Directly |
573 | * writing to the MSR is only possible when fpregs |
574 | * are valid and the fpstate is not. This is |
575 | * guaranteed when handling a userspace exception |
576 | * in *before* interrupts are re-enabled. |
577 | */ |
578 | lockdep_assert_irqs_disabled(); |
579 | |
580 | /* |
581 | * Hardware without ENQCMD will not generate |
582 | * #GPs that can be fixed up here. |
583 | */ |
584 | if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) |
585 | return false; |
586 | |
587 | /* |
588 | * If the mm has not been allocated a |
589 | * PASID, the #GP can not be fixed up. |
590 | */ |
591 | if (!mm_valid_pasid(current->mm)) |
592 | return false; |
593 | |
594 | pasid = current->mm->pasid; |
595 | |
596 | /* |
597 | * Did this thread already have its PASID activated? |
598 | * If so, the #GP must be from something else. |
599 | */ |
600 | if (current->pasid_activated) |
601 | return false; |
602 | |
603 | wrmsrl(MSR_IA32_PASID, val: pasid | MSR_IA32_PASID_VALID); |
604 | current->pasid_activated = 1; |
605 | |
606 | return true; |
607 | #else |
608 | return false; |
609 | #endif |
610 | } |
611 | |
612 | static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr, |
613 | unsigned long error_code, const char *str, |
614 | unsigned long address) |
615 | { |
616 | if (fixup_exception(regs, trapnr, error_code, fault_addr: address)) |
617 | return true; |
618 | |
619 | current->thread.error_code = error_code; |
620 | current->thread.trap_nr = trapnr; |
621 | |
622 | /* |
623 | * To be potentially processing a kprobe fault and to trust the result |
624 | * from kprobe_running(), we have to be non-preemptible. |
625 | */ |
626 | if (!preemptible() && kprobe_running() && |
627 | kprobe_fault_handler(regs, trapnr)) |
628 | return true; |
629 | |
630 | return notify_die(val: DIE_GPF, str, regs, err: error_code, trap: trapnr, SIGSEGV) == NOTIFY_STOP; |
631 | } |
632 | |
633 | static void gp_user_force_sig_segv(struct pt_regs *regs, int trapnr, |
634 | unsigned long error_code, const char *str) |
635 | { |
636 | current->thread.error_code = error_code; |
637 | current->thread.trap_nr = trapnr; |
638 | show_signal(current, SIGSEGV, type: "" , desc: str, regs, error_code); |
639 | force_sig(SIGSEGV); |
640 | } |
641 | |
642 | DEFINE_IDTENTRY_ERRORCODE(exc_general_protection) |
643 | { |
644 | char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR; |
645 | enum kernel_gp_hint hint = GP_NO_HINT; |
646 | unsigned long gp_addr; |
647 | |
648 | if (user_mode(regs) && try_fixup_enqcmd_gp()) |
649 | return; |
650 | |
651 | cond_local_irq_enable(regs); |
652 | |
653 | if (static_cpu_has(X86_FEATURE_UMIP)) { |
654 | if (user_mode(regs) && fixup_umip_exception(regs)) |
655 | goto exit; |
656 | } |
657 | |
658 | if (v8086_mode(regs)) { |
659 | local_irq_enable(); |
660 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); |
661 | local_irq_disable(); |
662 | return; |
663 | } |
664 | |
665 | if (user_mode(regs)) { |
666 | if (fixup_iopl_exception(regs)) |
667 | goto exit; |
668 | |
669 | if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, fault_addr: 0)) |
670 | goto exit; |
671 | |
672 | gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, str: desc); |
673 | goto exit; |
674 | } |
675 | |
676 | if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, str: desc, address: 0)) |
677 | goto exit; |
678 | |
679 | if (error_code) |
680 | snprintf(buf: desc, size: sizeof(desc), fmt: "segment-related " GPFSTR); |
681 | else |
682 | hint = get_kernel_gp_address(regs, addr: &gp_addr); |
683 | |
684 | if (hint != GP_NO_HINT) |
685 | snprintf(buf: desc, size: sizeof(desc), GPFSTR ", %s 0x%lx" , |
686 | (hint == GP_NON_CANONICAL) ? "probably for non-canonical address" |
687 | : "maybe for address" , |
688 | gp_addr); |
689 | |
690 | /* |
691 | * KASAN is interested only in the non-canonical case, clear it |
692 | * otherwise. |
693 | */ |
694 | if (hint != GP_NON_CANONICAL) |
695 | gp_addr = 0; |
696 | |
697 | die_addr(str: desc, regs, err: error_code, gp_addr); |
698 | |
699 | exit: |
700 | cond_local_irq_disable(regs); |
701 | } |
702 | |
703 | static bool do_int3(struct pt_regs *regs) |
704 | { |
705 | int res; |
706 | |
707 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
708 | if (kgdb_ll_trap(cmd: DIE_INT3, str: "int3" , regs, err: 0, X86_TRAP_BP, |
709 | SIGTRAP) == NOTIFY_STOP) |
710 | return true; |
711 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
712 | |
713 | #ifdef CONFIG_KPROBES |
714 | if (kprobe_int3_handler(regs)) |
715 | return true; |
716 | #endif |
717 | res = notify_die(val: DIE_INT3, str: "int3" , regs, err: 0, X86_TRAP_BP, SIGTRAP); |
718 | |
719 | return res == NOTIFY_STOP; |
720 | } |
721 | NOKPROBE_SYMBOL(do_int3); |
722 | |
723 | static void do_int3_user(struct pt_regs *regs) |
724 | { |
725 | if (do_int3(regs)) |
726 | return; |
727 | |
728 | cond_local_irq_enable(regs); |
729 | do_trap(X86_TRAP_BP, SIGTRAP, str: "int3" , regs, error_code: 0, sicode: 0, NULL); |
730 | cond_local_irq_disable(regs); |
731 | } |
732 | |
733 | DEFINE_IDTENTRY_RAW(exc_int3) |
734 | { |
735 | /* |
736 | * poke_int3_handler() is completely self contained code; it does (and |
737 | * must) *NOT* call out to anything, lest it hits upon yet another |
738 | * INT3. |
739 | */ |
740 | if (poke_int3_handler(regs)) |
741 | return; |
742 | |
743 | /* |
744 | * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() |
745 | * and therefore can trigger INT3, hence poke_int3_handler() must |
746 | * be done before. If the entry came from kernel mode, then use |
747 | * nmi_enter() because the INT3 could have been hit in any context |
748 | * including NMI. |
749 | */ |
750 | if (user_mode(regs)) { |
751 | irqentry_enter_from_user_mode(regs); |
752 | instrumentation_begin(); |
753 | do_int3_user(regs); |
754 | instrumentation_end(); |
755 | irqentry_exit_to_user_mode(regs); |
756 | } else { |
757 | irqentry_state_t irq_state = irqentry_nmi_enter(regs); |
758 | |
759 | instrumentation_begin(); |
760 | if (!do_int3(regs)) |
761 | die("int3" , regs, 0); |
762 | instrumentation_end(); |
763 | irqentry_nmi_exit(regs, irq_state); |
764 | } |
765 | } |
766 | |
767 | #ifdef CONFIG_X86_64 |
768 | /* |
769 | * Help handler running on a per-cpu (IST or entry trampoline) stack |
770 | * to switch to the normal thread stack if the interrupted code was in |
771 | * user mode. The actual stack switch is done in entry_64.S |
772 | */ |
773 | asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs) |
774 | { |
775 | struct pt_regs *regs = (struct pt_regs *)this_cpu_read(pcpu_hot.top_of_stack) - 1; |
776 | if (regs != eregs) |
777 | *regs = *eregs; |
778 | return regs; |
779 | } |
780 | |
781 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
782 | asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs) |
783 | { |
784 | unsigned long sp, *stack; |
785 | struct stack_info info; |
786 | struct pt_regs *regs_ret; |
787 | |
788 | /* |
789 | * In the SYSCALL entry path the RSP value comes from user-space - don't |
790 | * trust it and switch to the current kernel stack |
791 | */ |
792 | if (ip_within_syscall_gap(regs)) { |
793 | sp = this_cpu_read(pcpu_hot.top_of_stack); |
794 | goto sync; |
795 | } |
796 | |
797 | /* |
798 | * From here on the RSP value is trusted. Now check whether entry |
799 | * happened from a safe stack. Not safe are the entry or unknown stacks, |
800 | * use the fall-back stack instead in this case. |
801 | */ |
802 | sp = regs->sp; |
803 | stack = (unsigned long *)sp; |
804 | |
805 | if (!get_stack_info_noinstr(stack, current, info: &info) || info.type == STACK_TYPE_ENTRY || |
806 | info.type > STACK_TYPE_EXCEPTION_LAST) |
807 | sp = __this_cpu_ist_top_va(VC2); |
808 | |
809 | sync: |
810 | /* |
811 | * Found a safe stack - switch to it as if the entry didn't happen via |
812 | * IST stack. The code below only copies pt_regs, the real switch happens |
813 | * in assembly code. |
814 | */ |
815 | sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret); |
816 | |
817 | regs_ret = (struct pt_regs *)sp; |
818 | *regs_ret = *regs; |
819 | |
820 | return regs_ret; |
821 | } |
822 | #endif |
823 | |
824 | asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs) |
825 | { |
826 | struct pt_regs tmp, *new_stack; |
827 | |
828 | /* |
829 | * This is called from entry_64.S early in handling a fault |
830 | * caused by a bad iret to user mode. To handle the fault |
831 | * correctly, we want to move our stack frame to where it would |
832 | * be had we entered directly on the entry stack (rather than |
833 | * just below the IRET frame) and we want to pretend that the |
834 | * exception came from the IRET target. |
835 | */ |
836 | new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; |
837 | |
838 | /* Copy the IRET target to the temporary storage. */ |
839 | __memcpy(to: &tmp.ip, from: (void *)bad_regs->sp, len: 5*8); |
840 | |
841 | /* Copy the remainder of the stack from the current stack. */ |
842 | __memcpy(to: &tmp, from: bad_regs, offsetof(struct pt_regs, ip)); |
843 | |
844 | /* Update the entry stack */ |
845 | __memcpy(to: new_stack, from: &tmp, len: sizeof(tmp)); |
846 | |
847 | BUG_ON(!user_mode(new_stack)); |
848 | return new_stack; |
849 | } |
850 | #endif |
851 | |
852 | static bool is_sysenter_singlestep(struct pt_regs *regs) |
853 | { |
854 | /* |
855 | * We don't try for precision here. If we're anywhere in the region of |
856 | * code that can be single-stepped in the SYSENTER entry path, then |
857 | * assume that this is a useless single-step trap due to SYSENTER |
858 | * being invoked with TF set. (We don't know in advance exactly |
859 | * which instructions will be hit because BTF could plausibly |
860 | * be set.) |
861 | */ |
862 | #ifdef CONFIG_X86_32 |
863 | return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < |
864 | (unsigned long)__end_SYSENTER_singlestep_region - |
865 | (unsigned long)__begin_SYSENTER_singlestep_region; |
866 | #elif defined(CONFIG_IA32_EMULATION) |
867 | return (regs->ip - (unsigned long)entry_SYSENTER_compat) < |
868 | (unsigned long)__end_entry_SYSENTER_compat - |
869 | (unsigned long)entry_SYSENTER_compat; |
870 | #else |
871 | return false; |
872 | #endif |
873 | } |
874 | |
875 | static __always_inline unsigned long debug_read_clear_dr6(void) |
876 | { |
877 | unsigned long dr6; |
878 | |
879 | /* |
880 | * The Intel SDM says: |
881 | * |
882 | * Certain debug exceptions may clear bits 0-3. The remaining |
883 | * contents of the DR6 register are never cleared by the |
884 | * processor. To avoid confusion in identifying debug |
885 | * exceptions, debug handlers should clear the register before |
886 | * returning to the interrupted task. |
887 | * |
888 | * Keep it simple: clear DR6 immediately. |
889 | */ |
890 | get_debugreg(dr6, 6); |
891 | set_debugreg(DR6_RESERVED, reg: 6); |
892 | dr6 ^= DR6_RESERVED; /* Flip to positive polarity */ |
893 | |
894 | return dr6; |
895 | } |
896 | |
897 | /* |
898 | * Our handling of the processor debug registers is non-trivial. |
899 | * We do not clear them on entry and exit from the kernel. Therefore |
900 | * it is possible to get a watchpoint trap here from inside the kernel. |
901 | * However, the code in ./ptrace.c has ensured that the user can |
902 | * only set watchpoints on userspace addresses. Therefore the in-kernel |
903 | * watchpoint trap can only occur in code which is reading/writing |
904 | * from user space. Such code must not hold kernel locks (since it |
905 | * can equally take a page fault), therefore it is safe to call |
906 | * force_sig_info even though that claims and releases locks. |
907 | * |
908 | * Code in ./signal.c ensures that the debug control register |
909 | * is restored before we deliver any signal, and therefore that |
910 | * user code runs with the correct debug control register even though |
911 | * we clear it here. |
912 | * |
913 | * Being careful here means that we don't have to be as careful in a |
914 | * lot of more complicated places (task switching can be a bit lazy |
915 | * about restoring all the debug state, and ptrace doesn't have to |
916 | * find every occurrence of the TF bit that could be saved away even |
917 | * by user code) |
918 | * |
919 | * May run on IST stack. |
920 | */ |
921 | |
922 | static bool notify_debug(struct pt_regs *regs, unsigned long *dr6) |
923 | { |
924 | /* |
925 | * Notifiers will clear bits in @dr6 to indicate the event has been |
926 | * consumed - hw_breakpoint_handler(), single_stop_cont(). |
927 | * |
928 | * Notifiers will set bits in @virtual_dr6 to indicate the desire |
929 | * for signals - ptrace_triggered(), kgdb_hw_overflow_handler(). |
930 | */ |
931 | if (notify_die(val: DIE_DEBUG, str: "debug" , regs, err: (long)dr6, trap: 0, SIGTRAP) == NOTIFY_STOP) |
932 | return true; |
933 | |
934 | return false; |
935 | } |
936 | |
937 | static __always_inline void exc_debug_kernel(struct pt_regs *regs, |
938 | unsigned long dr6) |
939 | { |
940 | /* |
941 | * Disable breakpoints during exception handling; recursive exceptions |
942 | * are exceedingly 'fun'. |
943 | * |
944 | * Since this function is NOKPROBE, and that also applies to |
945 | * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a |
946 | * HW_BREAKPOINT_W on our stack) |
947 | * |
948 | * Entry text is excluded for HW_BP_X and cpu_entry_area, which |
949 | * includes the entry stack is excluded for everything. |
950 | */ |
951 | unsigned long dr7 = local_db_save(); |
952 | irqentry_state_t irq_state = irqentry_nmi_enter(regs); |
953 | instrumentation_begin(); |
954 | |
955 | /* |
956 | * If something gets miswired and we end up here for a user mode |
957 | * #DB, we will malfunction. |
958 | */ |
959 | WARN_ON_ONCE(user_mode(regs)); |
960 | |
961 | if (test_thread_flag(TIF_BLOCKSTEP)) { |
962 | /* |
963 | * The SDM says "The processor clears the BTF flag when it |
964 | * generates a debug exception." but PTRACE_BLOCKSTEP requested |
965 | * it for userspace, but we just took a kernel #DB, so re-set |
966 | * BTF. |
967 | */ |
968 | unsigned long debugctl; |
969 | |
970 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
971 | debugctl |= DEBUGCTLMSR_BTF; |
972 | wrmsrl(MSR_IA32_DEBUGCTLMSR, val: debugctl); |
973 | } |
974 | |
975 | /* |
976 | * Catch SYSENTER with TF set and clear DR_STEP. If this hit a |
977 | * watchpoint at the same time then that will still be handled. |
978 | */ |
979 | if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs)) |
980 | dr6 &= ~DR_STEP; |
981 | |
982 | /* |
983 | * The kernel doesn't use INT1 |
984 | */ |
985 | if (!dr6) |
986 | goto out; |
987 | |
988 | if (notify_debug(regs, dr6: &dr6)) |
989 | goto out; |
990 | |
991 | /* |
992 | * The kernel doesn't use TF single-step outside of: |
993 | * |
994 | * - Kprobes, consumed through kprobe_debug_handler() |
995 | * - KGDB, consumed through notify_debug() |
996 | * |
997 | * So if we get here with DR_STEP set, something is wonky. |
998 | * |
999 | * A known way to trigger this is through QEMU's GDB stub, |
1000 | * which leaks #DB into the guest and causes IST recursion. |
1001 | */ |
1002 | if (WARN_ON_ONCE(dr6 & DR_STEP)) |
1003 | regs->flags &= ~X86_EFLAGS_TF; |
1004 | out: |
1005 | instrumentation_end(); |
1006 | irqentry_nmi_exit(regs, irq_state); |
1007 | |
1008 | local_db_restore(dr7); |
1009 | } |
1010 | |
1011 | static __always_inline void exc_debug_user(struct pt_regs *regs, |
1012 | unsigned long dr6) |
1013 | { |
1014 | bool icebp; |
1015 | |
1016 | /* |
1017 | * If something gets miswired and we end up here for a kernel mode |
1018 | * #DB, we will malfunction. |
1019 | */ |
1020 | WARN_ON_ONCE(!user_mode(regs)); |
1021 | |
1022 | /* |
1023 | * NB: We can't easily clear DR7 here because |
1024 | * irqentry_exit_to_usermode() can invoke ptrace, schedule, access |
1025 | * user memory, etc. This means that a recursive #DB is possible. If |
1026 | * this happens, that #DB will hit exc_debug_kernel() and clear DR7. |
1027 | * Since we're not on the IST stack right now, everything will be |
1028 | * fine. |
1029 | */ |
1030 | |
1031 | irqentry_enter_from_user_mode(regs); |
1032 | instrumentation_begin(); |
1033 | |
1034 | /* |
1035 | * Start the virtual/ptrace DR6 value with just the DR_STEP mask |
1036 | * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits. |
1037 | * |
1038 | * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6) |
1039 | * even if it is not the result of PTRACE_SINGLESTEP. |
1040 | */ |
1041 | current->thread.virtual_dr6 = (dr6 & DR_STEP); |
1042 | |
1043 | /* |
1044 | * The SDM says "The processor clears the BTF flag when it |
1045 | * generates a debug exception." Clear TIF_BLOCKSTEP to keep |
1046 | * TIF_BLOCKSTEP in sync with the hardware BTF flag. |
1047 | */ |
1048 | clear_thread_flag(TIF_BLOCKSTEP); |
1049 | |
1050 | /* |
1051 | * If dr6 has no reason to give us about the origin of this trap, |
1052 | * then it's very likely the result of an icebp/int01 trap. |
1053 | * User wants a sigtrap for that. |
1054 | */ |
1055 | icebp = !dr6; |
1056 | |
1057 | if (notify_debug(regs, dr6: &dr6)) |
1058 | goto out; |
1059 | |
1060 | /* It's safe to allow irq's after DR6 has been saved */ |
1061 | local_irq_enable(); |
1062 | |
1063 | if (v8086_mode(regs)) { |
1064 | handle_vm86_trap(a: (struct kernel_vm86_regs *)regs, b: 0, X86_TRAP_DB); |
1065 | goto out_irq; |
1066 | } |
1067 | |
1068 | /* #DB for bus lock can only be triggered from userspace. */ |
1069 | if (dr6 & DR_BUS_LOCK) |
1070 | handle_bus_lock(regs); |
1071 | |
1072 | /* Add the virtual_dr6 bits for signals. */ |
1073 | dr6 |= current->thread.virtual_dr6; |
1074 | if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp) |
1075 | send_sigtrap(regs, error_code: 0, si_code: get_si_code(condition: dr6)); |
1076 | |
1077 | out_irq: |
1078 | local_irq_disable(); |
1079 | out: |
1080 | instrumentation_end(); |
1081 | irqentry_exit_to_user_mode(regs); |
1082 | } |
1083 | |
1084 | #ifdef CONFIG_X86_64 |
1085 | /* IST stack entry */ |
1086 | DEFINE_IDTENTRY_DEBUG(exc_debug) |
1087 | { |
1088 | exc_debug_kernel(regs, dr6: debug_read_clear_dr6()); |
1089 | } |
1090 | |
1091 | /* User entry, runs on regular task stack */ |
1092 | DEFINE_IDTENTRY_DEBUG_USER(exc_debug) |
1093 | { |
1094 | exc_debug_user(regs, dr6: debug_read_clear_dr6()); |
1095 | } |
1096 | #else |
1097 | /* 32 bit does not have separate entry points. */ |
1098 | DEFINE_IDTENTRY_RAW(exc_debug) |
1099 | { |
1100 | unsigned long dr6 = debug_read_clear_dr6(); |
1101 | |
1102 | if (user_mode(regs)) |
1103 | exc_debug_user(regs, dr6); |
1104 | else |
1105 | exc_debug_kernel(regs, dr6); |
1106 | } |
1107 | #endif |
1108 | |
1109 | /* |
1110 | * Note that we play around with the 'TS' bit in an attempt to get |
1111 | * the correct behaviour even in the presence of the asynchronous |
1112 | * IRQ13 behaviour |
1113 | */ |
1114 | static void math_error(struct pt_regs *regs, int trapnr) |
1115 | { |
1116 | struct task_struct *task = current; |
1117 | struct fpu *fpu = &task->thread.fpu; |
1118 | int si_code; |
1119 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
1120 | "simd exception" ; |
1121 | |
1122 | cond_local_irq_enable(regs); |
1123 | |
1124 | if (!user_mode(regs)) { |
1125 | if (fixup_exception(regs, trapnr, error_code: 0, fault_addr: 0)) |
1126 | goto exit; |
1127 | |
1128 | task->thread.error_code = 0; |
1129 | task->thread.trap_nr = trapnr; |
1130 | |
1131 | if (notify_die(val: DIE_TRAP, str, regs, err: 0, trap: trapnr, |
1132 | SIGFPE) != NOTIFY_STOP) |
1133 | die(str, regs, 0); |
1134 | goto exit; |
1135 | } |
1136 | |
1137 | /* |
1138 | * Synchronize the FPU register state to the memory register state |
1139 | * if necessary. This allows the exception handler to inspect it. |
1140 | */ |
1141 | fpu_sync_fpstate(fpu); |
1142 | |
1143 | task->thread.trap_nr = trapnr; |
1144 | task->thread.error_code = 0; |
1145 | |
1146 | si_code = fpu__exception_code(fpu, trap_nr: trapnr); |
1147 | /* Retry when we get spurious exceptions: */ |
1148 | if (!si_code) |
1149 | goto exit; |
1150 | |
1151 | if (fixup_vdso_exception(regs, trapnr, error_code: 0, fault_addr: 0)) |
1152 | goto exit; |
1153 | |
1154 | force_sig_fault(SIGFPE, code: si_code, |
1155 | addr: (void __user *)uprobe_get_trap_addr(regs)); |
1156 | exit: |
1157 | cond_local_irq_disable(regs); |
1158 | } |
1159 | |
1160 | DEFINE_IDTENTRY(exc_coprocessor_error) |
1161 | { |
1162 | math_error(regs, X86_TRAP_MF); |
1163 | } |
1164 | |
1165 | DEFINE_IDTENTRY(exc_simd_coprocessor_error) |
1166 | { |
1167 | if (IS_ENABLED(CONFIG_X86_INVD_BUG)) { |
1168 | /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */ |
1169 | if (!static_cpu_has(X86_FEATURE_XMM)) { |
1170 | __exc_general_protection(regs, error_code: 0); |
1171 | return; |
1172 | } |
1173 | } |
1174 | math_error(regs, X86_TRAP_XF); |
1175 | } |
1176 | |
1177 | DEFINE_IDTENTRY(exc_spurious_interrupt_bug) |
1178 | { |
1179 | /* |
1180 | * This addresses a Pentium Pro Erratum: |
1181 | * |
1182 | * PROBLEM: If the APIC subsystem is configured in mixed mode with |
1183 | * Virtual Wire mode implemented through the local APIC, an |
1184 | * interrupt vector of 0Fh (Intel reserved encoding) may be |
1185 | * generated by the local APIC (Int 15). This vector may be |
1186 | * generated upon receipt of a spurious interrupt (an interrupt |
1187 | * which is removed before the system receives the INTA sequence) |
1188 | * instead of the programmed 8259 spurious interrupt vector. |
1189 | * |
1190 | * IMPLICATION: The spurious interrupt vector programmed in the |
1191 | * 8259 is normally handled by an operating system's spurious |
1192 | * interrupt handler. However, a vector of 0Fh is unknown to some |
1193 | * operating systems, which would crash if this erratum occurred. |
1194 | * |
1195 | * In theory this could be limited to 32bit, but the handler is not |
1196 | * hurting and who knows which other CPUs suffer from this. |
1197 | */ |
1198 | } |
1199 | |
1200 | static bool handle_xfd_event(struct pt_regs *regs) |
1201 | { |
1202 | u64 xfd_err; |
1203 | int err; |
1204 | |
1205 | if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD)) |
1206 | return false; |
1207 | |
1208 | rdmsrl(MSR_IA32_XFD_ERR, xfd_err); |
1209 | if (!xfd_err) |
1210 | return false; |
1211 | |
1212 | wrmsrl(MSR_IA32_XFD_ERR, val: 0); |
1213 | |
1214 | /* Die if that happens in kernel space */ |
1215 | if (WARN_ON(!user_mode(regs))) |
1216 | return false; |
1217 | |
1218 | local_irq_enable(); |
1219 | |
1220 | err = xfd_enable_feature(xfd_err); |
1221 | |
1222 | switch (err) { |
1223 | case -EPERM: |
1224 | force_sig_fault(SIGILL, ILL_ILLOPC, addr: error_get_trap_addr(regs)); |
1225 | break; |
1226 | case -EFAULT: |
1227 | force_sig(SIGSEGV); |
1228 | break; |
1229 | } |
1230 | |
1231 | local_irq_disable(); |
1232 | return true; |
1233 | } |
1234 | |
1235 | DEFINE_IDTENTRY(exc_device_not_available) |
1236 | { |
1237 | unsigned long cr0 = read_cr0(); |
1238 | |
1239 | if (handle_xfd_event(regs)) |
1240 | return; |
1241 | |
1242 | #ifdef CONFIG_MATH_EMULATION |
1243 | if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) { |
1244 | struct math_emu_info info = { }; |
1245 | |
1246 | cond_local_irq_enable(regs); |
1247 | |
1248 | info.regs = regs; |
1249 | math_emulate(&info); |
1250 | |
1251 | cond_local_irq_disable(regs); |
1252 | return; |
1253 | } |
1254 | #endif |
1255 | |
1256 | /* This should not happen. */ |
1257 | if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set" )) { |
1258 | /* Try to fix it up and carry on. */ |
1259 | write_cr0(x: cr0 & ~X86_CR0_TS); |
1260 | } else { |
1261 | /* |
1262 | * Something terrible happened, and we're better off trying |
1263 | * to kill the task than getting stuck in a never-ending |
1264 | * loop of #NM faults. |
1265 | */ |
1266 | die("unexpected #NM exception" , regs, 0); |
1267 | } |
1268 | } |
1269 | |
1270 | #ifdef CONFIG_INTEL_TDX_GUEST |
1271 | |
1272 | #define VE_FAULT_STR "VE fault" |
1273 | |
1274 | static void ve_raise_fault(struct pt_regs *regs, long error_code, |
1275 | unsigned long address) |
1276 | { |
1277 | if (user_mode(regs)) { |
1278 | gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR); |
1279 | return; |
1280 | } |
1281 | |
1282 | if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, |
1283 | VE_FAULT_STR, address)) { |
1284 | return; |
1285 | } |
1286 | |
1287 | die_addr(VE_FAULT_STR, regs, err: error_code, gp_addr: address); |
1288 | } |
1289 | |
1290 | /* |
1291 | * Virtualization Exceptions (#VE) are delivered to TDX guests due to |
1292 | * specific guest actions which may happen in either user space or the |
1293 | * kernel: |
1294 | * |
1295 | * * Specific instructions (WBINVD, for example) |
1296 | * * Specific MSR accesses |
1297 | * * Specific CPUID leaf accesses |
1298 | * * Access to specific guest physical addresses |
1299 | * |
1300 | * In the settings that Linux will run in, virtualization exceptions are |
1301 | * never generated on accesses to normal, TD-private memory that has been |
1302 | * accepted (by BIOS or with tdx_enc_status_changed()). |
1303 | * |
1304 | * Syscall entry code has a critical window where the kernel stack is not |
1305 | * yet set up. Any exception in this window leads to hard to debug issues |
1306 | * and can be exploited for privilege escalation. Exceptions in the NMI |
1307 | * entry code also cause issues. Returning from the exception handler with |
1308 | * IRET will re-enable NMIs and nested NMI will corrupt the NMI stack. |
1309 | * |
1310 | * For these reasons, the kernel avoids #VEs during the syscall gap and |
1311 | * the NMI entry code. Entry code paths do not access TD-shared memory, |
1312 | * MMIO regions, use #VE triggering MSRs, instructions, or CPUID leaves |
1313 | * that might generate #VE. VMM can remove memory from TD at any point, |
1314 | * but access to unaccepted (or missing) private memory leads to VM |
1315 | * termination, not to #VE. |
1316 | * |
1317 | * Similarly to page faults and breakpoints, #VEs are allowed in NMI |
1318 | * handlers once the kernel is ready to deal with nested NMIs. |
1319 | * |
1320 | * During #VE delivery, all interrupts, including NMIs, are blocked until |
1321 | * TDGETVEINFO is called. It prevents #VE nesting until the kernel reads |
1322 | * the VE info. |
1323 | * |
1324 | * If a guest kernel action which would normally cause a #VE occurs in |
1325 | * the interrupt-disabled region before TDGETVEINFO, a #DF (fault |
1326 | * exception) is delivered to the guest which will result in an oops. |
1327 | * |
1328 | * The entry code has been audited carefully for following these expectations. |
1329 | * Changes in the entry code have to be audited for correctness vs. this |
1330 | * aspect. Similarly to #PF, #VE in these places will expose kernel to |
1331 | * privilege escalation or may lead to random crashes. |
1332 | */ |
1333 | DEFINE_IDTENTRY(exc_virtualization_exception) |
1334 | { |
1335 | struct ve_info ve; |
1336 | |
1337 | /* |
1338 | * NMIs/Machine-checks/Interrupts will be in a disabled state |
1339 | * till TDGETVEINFO TDCALL is executed. This ensures that VE |
1340 | * info cannot be overwritten by a nested #VE. |
1341 | */ |
1342 | tdx_get_ve_info(ve: &ve); |
1343 | |
1344 | cond_local_irq_enable(regs); |
1345 | |
1346 | /* |
1347 | * If tdx_handle_virt_exception() could not process |
1348 | * it successfully, treat it as #GP(0) and handle it. |
1349 | */ |
1350 | if (!tdx_handle_virt_exception(regs, ve: &ve)) |
1351 | ve_raise_fault(regs, error_code: 0, address: ve.gla); |
1352 | |
1353 | cond_local_irq_disable(regs); |
1354 | } |
1355 | |
1356 | #endif |
1357 | |
1358 | #ifdef CONFIG_X86_32 |
1359 | DEFINE_IDTENTRY_SW(iret_error) |
1360 | { |
1361 | local_irq_enable(); |
1362 | if (notify_die(DIE_TRAP, "iret exception" , regs, 0, |
1363 | X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { |
1364 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception" , regs, 0, |
1365 | ILL_BADSTK, (void __user *)NULL); |
1366 | } |
1367 | local_irq_disable(); |
1368 | } |
1369 | #endif |
1370 | |
1371 | void __init trap_init(void) |
1372 | { |
1373 | /* Init cpu_entry_area before IST entries are set up */ |
1374 | setup_cpu_entry_areas(); |
1375 | |
1376 | /* Init GHCB memory pages when running as an SEV-ES guest */ |
1377 | sev_es_init_vc_handling(); |
1378 | |
1379 | /* Initialize TSS before setting up traps so ISTs work */ |
1380 | cpu_init_exception_handling(); |
1381 | /* Setup traps as cpu_init() might #GP */ |
1382 | idt_setup_traps(); |
1383 | cpu_init(); |
1384 | } |
1385 | |