1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This is for all the tests related to logic bugs (e.g. bad dereferences, |
4 | * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and |
5 | * lockups) along with other things that don't fit well into existing LKDTM |
6 | * test source files. |
7 | */ |
8 | #include "lkdtm.h" |
9 | #include <linux/cpu.h> |
10 | #include <linux/list.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/sched/task_stack.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/stop_machine.h> |
16 | #include <linux/uaccess.h> |
17 | |
18 | #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) |
19 | #include <asm/desc.h> |
20 | #endif |
21 | |
22 | struct lkdtm_list { |
23 | struct list_head node; |
24 | }; |
25 | |
26 | /* |
27 | * Make sure our attempts to over run the kernel stack doesn't trigger |
28 | * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we |
29 | * recurse past the end of THREAD_SIZE by default. |
30 | */ |
31 | #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) |
32 | #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) |
33 | #else |
34 | #define REC_STACK_SIZE (THREAD_SIZE / 8UL) |
35 | #endif |
36 | #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) |
37 | |
38 | static int recur_count = REC_NUM_DEFAULT; |
39 | |
40 | static DEFINE_SPINLOCK(lock_me_up); |
41 | |
42 | /* |
43 | * Make sure compiler does not optimize this function or stack frame away: |
44 | * - function marked noinline |
45 | * - stack variables are marked volatile |
46 | * - stack variables are written (memset()) and read (buf[..] passed as arg) |
47 | * - function may have external effects (memzero_explicit()) |
48 | * - no tail recursion possible |
49 | */ |
50 | static int noinline recursive_loop(int remaining) |
51 | { |
52 | volatile char buf[REC_STACK_SIZE]; |
53 | volatile int ret; |
54 | |
55 | memset((void *)buf, remaining & 0xFF, sizeof(buf)); |
56 | if (!remaining) |
57 | ret = 0; |
58 | else |
59 | ret = recursive_loop(remaining: (int)buf[remaining % sizeof(buf)] - 1); |
60 | memzero_explicit(s: (void *)buf, count: sizeof(buf)); |
61 | return ret; |
62 | } |
63 | |
64 | /* If the depth is negative, use the default, otherwise keep parameter. */ |
65 | void __init lkdtm_bugs_init(int *recur_param) |
66 | { |
67 | if (*recur_param < 0) |
68 | *recur_param = recur_count; |
69 | else |
70 | recur_count = *recur_param; |
71 | } |
72 | |
73 | static void lkdtm_PANIC(void) |
74 | { |
75 | panic(fmt: "dumptest" ); |
76 | } |
77 | |
78 | static int panic_stop_irqoff_fn(void *arg) |
79 | { |
80 | atomic_t *v = arg; |
81 | |
82 | /* |
83 | * As stop_machine() disables interrupts, all CPUs within this function |
84 | * have interrupts disabled and cannot take a regular IPI. |
85 | * |
86 | * The last CPU which enters here will trigger a panic, and as all CPUs |
87 | * cannot take a regular IPI, we'll only be able to stop secondaries if |
88 | * smp_send_stop() or crash_smp_send_stop() uses an NMI. |
89 | */ |
90 | if (atomic_inc_return(v) == num_online_cpus()) |
91 | panic(fmt: "panic stop irqoff test" ); |
92 | |
93 | for (;;) |
94 | cpu_relax(); |
95 | } |
96 | |
97 | static void lkdtm_PANIC_STOP_IRQOFF(void) |
98 | { |
99 | atomic_t v = ATOMIC_INIT(0); |
100 | stop_machine(fn: panic_stop_irqoff_fn, data: &v, cpu_online_mask); |
101 | } |
102 | |
103 | static void lkdtm_BUG(void) |
104 | { |
105 | BUG(); |
106 | } |
107 | |
108 | static int warn_counter; |
109 | |
110 | static void lkdtm_WARNING(void) |
111 | { |
112 | WARN_ON(++warn_counter); |
113 | } |
114 | |
115 | static void lkdtm_WARNING_MESSAGE(void) |
116 | { |
117 | WARN(1, "Warning message trigger count: %d\n" , ++warn_counter); |
118 | } |
119 | |
120 | static void lkdtm_EXCEPTION(void) |
121 | { |
122 | *((volatile int *) 0) = 0; |
123 | } |
124 | |
125 | static void lkdtm_LOOP(void) |
126 | { |
127 | for (;;) |
128 | ; |
129 | } |
130 | |
131 | static void lkdtm_EXHAUST_STACK(void) |
132 | { |
133 | pr_info("Calling function with %lu frame size to depth %d ...\n" , |
134 | REC_STACK_SIZE, recur_count); |
135 | recursive_loop(remaining: recur_count); |
136 | pr_info("FAIL: survived without exhausting stack?!\n" ); |
137 | } |
138 | |
139 | static noinline void __lkdtm_CORRUPT_STACK(void *stack) |
140 | { |
141 | memset(stack, '\xff', 64); |
142 | } |
143 | |
144 | /* This should trip the stack canary, not corrupt the return address. */ |
145 | static noinline void lkdtm_CORRUPT_STACK(void) |
146 | { |
147 | /* Use default char array length that triggers stack protection. */ |
148 | char data[8] __aligned(sizeof(void *)); |
149 | |
150 | pr_info("Corrupting stack containing char array ...\n" ); |
151 | __lkdtm_CORRUPT_STACK(stack: (void *)&data); |
152 | } |
153 | |
154 | /* Same as above but will only get a canary with -fstack-protector-strong */ |
155 | static noinline void lkdtm_CORRUPT_STACK_STRONG(void) |
156 | { |
157 | union { |
158 | unsigned short shorts[4]; |
159 | unsigned long *ptr; |
160 | } data __aligned(sizeof(void *)); |
161 | |
162 | pr_info("Corrupting stack containing union ...\n" ); |
163 | __lkdtm_CORRUPT_STACK(stack: (void *)&data); |
164 | } |
165 | |
166 | static pid_t stack_pid; |
167 | static unsigned long stack_addr; |
168 | |
169 | static void lkdtm_REPORT_STACK(void) |
170 | { |
171 | volatile uintptr_t magic; |
172 | pid_t pid = task_pid_nr(current); |
173 | |
174 | if (pid != stack_pid) { |
175 | pr_info("Starting stack offset tracking for pid %d\n" , pid); |
176 | stack_pid = pid; |
177 | stack_addr = (uintptr_t)&magic; |
178 | } |
179 | |
180 | pr_info("Stack offset: %d\n" , (int)(stack_addr - (uintptr_t)&magic)); |
181 | } |
182 | |
183 | static pid_t stack_canary_pid; |
184 | static unsigned long stack_canary; |
185 | static unsigned long stack_canary_offset; |
186 | |
187 | static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack) |
188 | { |
189 | int i = 0; |
190 | pid_t pid = task_pid_nr(current); |
191 | unsigned long *canary = (unsigned long *)stack; |
192 | unsigned long current_offset = 0, init_offset = 0; |
193 | |
194 | /* Do our best to find the canary in a 16 word window ... */ |
195 | for (i = 1; i < 16; i++) { |
196 | canary = (unsigned long *)stack + i; |
197 | #ifdef CONFIG_STACKPROTECTOR |
198 | if (*canary == current->stack_canary) |
199 | current_offset = i; |
200 | if (*canary == init_task.stack_canary) |
201 | init_offset = i; |
202 | #endif |
203 | } |
204 | |
205 | if (current_offset == 0) { |
206 | /* |
207 | * If the canary doesn't match what's in the task_struct, |
208 | * we're either using a global canary or the stack frame |
209 | * layout changed. |
210 | */ |
211 | if (init_offset != 0) { |
212 | pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n" , |
213 | init_offset, pid); |
214 | } else { |
215 | pr_warn("FAIL: did not correctly locate stack canary :(\n" ); |
216 | pr_expected_config(CONFIG_STACKPROTECTOR); |
217 | } |
218 | |
219 | return; |
220 | } else if (init_offset != 0) { |
221 | pr_warn("WARNING: found both current and init_task canaries nearby?!\n" ); |
222 | } |
223 | |
224 | canary = (unsigned long *)stack + current_offset; |
225 | if (stack_canary_pid == 0) { |
226 | stack_canary = *canary; |
227 | stack_canary_pid = pid; |
228 | stack_canary_offset = current_offset; |
229 | pr_info("Recorded stack canary for pid %d at offset %ld\n" , |
230 | stack_canary_pid, stack_canary_offset); |
231 | } else if (pid == stack_canary_pid) { |
232 | pr_warn("ERROR: saw pid %d again -- please use a new pid\n" , pid); |
233 | } else { |
234 | if (current_offset != stack_canary_offset) { |
235 | pr_warn("ERROR: canary offset changed from %ld to %ld!?\n" , |
236 | stack_canary_offset, current_offset); |
237 | return; |
238 | } |
239 | |
240 | if (*canary == stack_canary) { |
241 | pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n" , |
242 | stack_canary_pid, pid, current_offset); |
243 | } else { |
244 | pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n" , |
245 | stack_canary_pid, pid, current_offset); |
246 | /* Reset the test. */ |
247 | stack_canary_pid = 0; |
248 | } |
249 | } |
250 | } |
251 | |
252 | static void lkdtm_REPORT_STACK_CANARY(void) |
253 | { |
254 | /* Use default char array length that triggers stack protection. */ |
255 | char data[8] __aligned(sizeof(void *)) = { }; |
256 | |
257 | __lkdtm_REPORT_STACK_CANARY(stack: (void *)&data); |
258 | } |
259 | |
260 | static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) |
261 | { |
262 | static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; |
263 | u32 *p; |
264 | u32 val = 0x12345678; |
265 | |
266 | p = (u32 *)(data + 1); |
267 | if (*p == 0) |
268 | val = 0x87654321; |
269 | *p = val; |
270 | |
271 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) |
272 | pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n" ); |
273 | } |
274 | |
275 | static void lkdtm_SOFTLOCKUP(void) |
276 | { |
277 | preempt_disable(); |
278 | for (;;) |
279 | cpu_relax(); |
280 | } |
281 | |
282 | static void lkdtm_HARDLOCKUP(void) |
283 | { |
284 | local_irq_disable(); |
285 | for (;;) |
286 | cpu_relax(); |
287 | } |
288 | |
289 | static void lkdtm_SPINLOCKUP(void) |
290 | { |
291 | /* Must be called twice to trigger. */ |
292 | spin_lock(lock: &lock_me_up); |
293 | /* Let sparse know we intended to exit holding the lock. */ |
294 | __release(&lock_me_up); |
295 | } |
296 | |
297 | static void __noreturn lkdtm_HUNG_TASK(void) |
298 | { |
299 | set_current_state(TASK_UNINTERRUPTIBLE); |
300 | schedule(); |
301 | BUG(); |
302 | } |
303 | |
304 | static volatile unsigned int huge = INT_MAX - 2; |
305 | static volatile unsigned int ignored; |
306 | |
307 | static void lkdtm_OVERFLOW_SIGNED(void) |
308 | { |
309 | int value; |
310 | |
311 | value = huge; |
312 | pr_info("Normal signed addition ...\n" ); |
313 | value += 1; |
314 | ignored = value; |
315 | |
316 | pr_info("Overflowing signed addition ...\n" ); |
317 | value += 4; |
318 | ignored = value; |
319 | } |
320 | |
321 | |
322 | static void lkdtm_OVERFLOW_UNSIGNED(void) |
323 | { |
324 | unsigned int value; |
325 | |
326 | value = huge; |
327 | pr_info("Normal unsigned addition ...\n" ); |
328 | value += 1; |
329 | ignored = value; |
330 | |
331 | pr_info("Overflowing unsigned addition ...\n" ); |
332 | value += 4; |
333 | ignored = value; |
334 | } |
335 | |
336 | /* Intentionally using unannotated flex array definition. */ |
337 | struct array_bounds_flex_array { |
338 | int one; |
339 | int two; |
340 | char data[]; |
341 | }; |
342 | |
343 | struct array_bounds { |
344 | int one; |
345 | int two; |
346 | char data[8]; |
347 | int three; |
348 | }; |
349 | |
350 | static void lkdtm_ARRAY_BOUNDS(void) |
351 | { |
352 | struct array_bounds_flex_array *not_checked; |
353 | struct array_bounds *checked; |
354 | volatile int i; |
355 | |
356 | not_checked = kmalloc(size: sizeof(*not_checked) * 2, GFP_KERNEL); |
357 | checked = kmalloc(size: sizeof(*checked) * 2, GFP_KERNEL); |
358 | if (!not_checked || !checked) { |
359 | kfree(objp: not_checked); |
360 | kfree(objp: checked); |
361 | return; |
362 | } |
363 | |
364 | pr_info("Array access within bounds ...\n" ); |
365 | /* For both, touch all bytes in the actual member size. */ |
366 | for (i = 0; i < sizeof(checked->data); i++) |
367 | checked->data[i] = 'A'; |
368 | /* |
369 | * For the uninstrumented flex array member, also touch 1 byte |
370 | * beyond to verify it is correctly uninstrumented. |
371 | */ |
372 | for (i = 0; i < 2; i++) |
373 | not_checked->data[i] = 'A'; |
374 | |
375 | pr_info("Array access beyond bounds ...\n" ); |
376 | for (i = 0; i < sizeof(checked->data) + 1; i++) |
377 | checked->data[i] = 'B'; |
378 | |
379 | kfree(objp: not_checked); |
380 | kfree(objp: checked); |
381 | pr_err("FAIL: survived array bounds overflow!\n" ); |
382 | if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) |
383 | pr_expected_config(CONFIG_UBSAN_TRAP); |
384 | else |
385 | pr_expected_config(CONFIG_UBSAN_BOUNDS); |
386 | } |
387 | |
388 | struct lkdtm_annotated { |
389 | unsigned long flags; |
390 | int count; |
391 | int array[] __counted_by(count); |
392 | }; |
393 | |
394 | static volatile int fam_count = 4; |
395 | |
396 | static void lkdtm_FAM_BOUNDS(void) |
397 | { |
398 | struct lkdtm_annotated *inst; |
399 | |
400 | inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL); |
401 | if (!inst) { |
402 | pr_err("FAIL: could not allocate test struct!\n" ); |
403 | return; |
404 | } |
405 | |
406 | inst->count = fam_count; |
407 | pr_info("Array access within bounds ...\n" ); |
408 | inst->array[1] = fam_count; |
409 | ignored = inst->array[1]; |
410 | |
411 | pr_info("Array access beyond bounds ...\n" ); |
412 | inst->array[fam_count] = fam_count; |
413 | ignored = inst->array[fam_count]; |
414 | |
415 | kfree(objp: inst); |
416 | |
417 | pr_err("FAIL: survived access of invalid flexible array member index!\n" ); |
418 | |
419 | if (!__has_attribute(__counted_by__)) |
420 | pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n" , |
421 | lkdtm_kernel_info); |
422 | else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) |
423 | pr_expected_config(CONFIG_UBSAN_TRAP); |
424 | else |
425 | pr_expected_config(CONFIG_UBSAN_BOUNDS); |
426 | } |
427 | |
428 | static void lkdtm_CORRUPT_LIST_ADD(void) |
429 | { |
430 | /* |
431 | * Initially, an empty list via LIST_HEAD: |
432 | * test_head.next = &test_head |
433 | * test_head.prev = &test_head |
434 | */ |
435 | LIST_HEAD(test_head); |
436 | struct lkdtm_list good, bad; |
437 | void *target[2] = { }; |
438 | void *redirection = ⌖ |
439 | |
440 | pr_info("attempting good list addition\n" ); |
441 | |
442 | /* |
443 | * Adding to the list performs these actions: |
444 | * test_head.next->prev = &good.node |
445 | * good.node.next = test_head.next |
446 | * good.node.prev = test_head |
447 | * test_head.next = good.node |
448 | */ |
449 | list_add(new: &good.node, head: &test_head); |
450 | |
451 | pr_info("attempting corrupted list addition\n" ); |
452 | /* |
453 | * In simulating this "write what where" primitive, the "what" is |
454 | * the address of &bad.node, and the "where" is the address held |
455 | * by "redirection". |
456 | */ |
457 | test_head.next = redirection; |
458 | list_add(new: &bad.node, head: &test_head); |
459 | |
460 | if (target[0] == NULL && target[1] == NULL) |
461 | pr_err("Overwrite did not happen, but no BUG?!\n" ); |
462 | else { |
463 | pr_err("list_add() corruption not detected!\n" ); |
464 | pr_expected_config(CONFIG_LIST_HARDENED); |
465 | } |
466 | } |
467 | |
468 | static void lkdtm_CORRUPT_LIST_DEL(void) |
469 | { |
470 | LIST_HEAD(test_head); |
471 | struct lkdtm_list item; |
472 | void *target[2] = { }; |
473 | void *redirection = ⌖ |
474 | |
475 | list_add(new: &item.node, head: &test_head); |
476 | |
477 | pr_info("attempting good list removal\n" ); |
478 | list_del(entry: &item.node); |
479 | |
480 | pr_info("attempting corrupted list removal\n" ); |
481 | list_add(new: &item.node, head: &test_head); |
482 | |
483 | /* As with the list_add() test above, this corrupts "next". */ |
484 | item.node.next = redirection; |
485 | list_del(entry: &item.node); |
486 | |
487 | if (target[0] == NULL && target[1] == NULL) |
488 | pr_err("Overwrite did not happen, but no BUG?!\n" ); |
489 | else { |
490 | pr_err("list_del() corruption not detected!\n" ); |
491 | pr_expected_config(CONFIG_LIST_HARDENED); |
492 | } |
493 | } |
494 | |
495 | /* Test that VMAP_STACK is actually allocating with a leading guard page */ |
496 | static void lkdtm_STACK_GUARD_PAGE_LEADING(void) |
497 | { |
498 | const unsigned char *stack = task_stack_page(current); |
499 | const unsigned char *ptr = stack - 1; |
500 | volatile unsigned char byte; |
501 | |
502 | pr_info("attempting bad read from page below current stack\n" ); |
503 | |
504 | byte = *ptr; |
505 | |
506 | pr_err("FAIL: accessed page before stack! (byte: %x)\n" , byte); |
507 | } |
508 | |
509 | /* Test that VMAP_STACK is actually allocating with a trailing guard page */ |
510 | static void lkdtm_STACK_GUARD_PAGE_TRAILING(void) |
511 | { |
512 | const unsigned char *stack = task_stack_page(current); |
513 | const unsigned char *ptr = stack + THREAD_SIZE; |
514 | volatile unsigned char byte; |
515 | |
516 | pr_info("attempting bad read from page above current stack\n" ); |
517 | |
518 | byte = *ptr; |
519 | |
520 | pr_err("FAIL: accessed page after stack! (byte: %x)\n" , byte); |
521 | } |
522 | |
523 | static void lkdtm_UNSET_SMEP(void) |
524 | { |
525 | #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) |
526 | #define MOV_CR4_DEPTH 64 |
527 | void (*direct_write_cr4)(unsigned long val); |
528 | unsigned char *insn; |
529 | unsigned long cr4; |
530 | int i; |
531 | |
532 | cr4 = native_read_cr4(); |
533 | |
534 | if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) { |
535 | pr_err("FAIL: SMEP not in use\n" ); |
536 | return; |
537 | } |
538 | cr4 &= ~(X86_CR4_SMEP); |
539 | |
540 | pr_info("trying to clear SMEP normally\n" ); |
541 | native_write_cr4(val: cr4); |
542 | if (cr4 == native_read_cr4()) { |
543 | pr_err("FAIL: pinning SMEP failed!\n" ); |
544 | cr4 |= X86_CR4_SMEP; |
545 | pr_info("restoring SMEP\n" ); |
546 | native_write_cr4(val: cr4); |
547 | return; |
548 | } |
549 | pr_info("ok: SMEP did not get cleared\n" ); |
550 | |
551 | /* |
552 | * To test the post-write pinning verification we need to call |
553 | * directly into the middle of native_write_cr4() where the |
554 | * cr4 write happens, skipping any pinning. This searches for |
555 | * the cr4 writing instruction. |
556 | */ |
557 | insn = (unsigned char *)native_write_cr4; |
558 | OPTIMIZER_HIDE_VAR(insn); |
559 | for (i = 0; i < MOV_CR4_DEPTH; i++) { |
560 | /* mov %rdi, %cr4 */ |
561 | if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) |
562 | break; |
563 | /* mov %rdi,%rax; mov %rax, %cr4 */ |
564 | if (insn[i] == 0x48 && insn[i+1] == 0x89 && |
565 | insn[i+2] == 0xf8 && insn[i+3] == 0x0f && |
566 | insn[i+4] == 0x22 && insn[i+5] == 0xe0) |
567 | break; |
568 | } |
569 | if (i >= MOV_CR4_DEPTH) { |
570 | pr_info("ok: cannot locate cr4 writing call gadget\n" ); |
571 | return; |
572 | } |
573 | direct_write_cr4 = (void *)(insn + i); |
574 | |
575 | pr_info("trying to clear SMEP with call gadget\n" ); |
576 | direct_write_cr4(cr4); |
577 | if (native_read_cr4() & X86_CR4_SMEP) { |
578 | pr_info("ok: SMEP removal was reverted\n" ); |
579 | } else { |
580 | pr_err("FAIL: cleared SMEP not detected!\n" ); |
581 | cr4 |= X86_CR4_SMEP; |
582 | pr_info("restoring SMEP\n" ); |
583 | native_write_cr4(val: cr4); |
584 | } |
585 | #else |
586 | pr_err("XFAIL: this test is x86_64-only\n" ); |
587 | #endif |
588 | } |
589 | |
590 | static void lkdtm_DOUBLE_FAULT(void) |
591 | { |
592 | #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) |
593 | /* |
594 | * Trigger #DF by setting the stack limit to zero. This clobbers |
595 | * a GDT TLS slot, which is okay because the current task will die |
596 | * anyway due to the double fault. |
597 | */ |
598 | struct desc_struct d = { |
599 | .type = 3, /* expand-up, writable, accessed data */ |
600 | .p = 1, /* present */ |
601 | .d = 1, /* 32-bit */ |
602 | .g = 0, /* limit in bytes */ |
603 | .s = 1, /* not system */ |
604 | }; |
605 | |
606 | local_irq_disable(); |
607 | write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()), |
608 | GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S); |
609 | |
610 | /* |
611 | * Put our zero-limit segment in SS and then trigger a fault. The |
612 | * 4-byte access to (%esp) will fault with #SS, and the attempt to |
613 | * deliver the fault will recursively cause #SS and result in #DF. |
614 | * This whole process happens while NMIs and MCEs are blocked by the |
615 | * MOV SS window. This is nice because an NMI with an invalid SS |
616 | * would also double-fault, resulting in the NMI or MCE being lost. |
617 | */ |
618 | asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: |
619 | "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); |
620 | |
621 | pr_err("FAIL: tried to double fault but didn't die\n" ); |
622 | #else |
623 | pr_err("XFAIL: this test is ia32-only\n" ); |
624 | #endif |
625 | } |
626 | |
627 | #ifdef CONFIG_ARM64 |
628 | static noinline void change_pac_parameters(void) |
629 | { |
630 | if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) { |
631 | /* Reset the keys of current task */ |
632 | ptrauth_thread_init_kernel(current); |
633 | ptrauth_thread_switch_kernel(current); |
634 | } |
635 | } |
636 | #endif |
637 | |
638 | static noinline void lkdtm_CORRUPT_PAC(void) |
639 | { |
640 | #ifdef CONFIG_ARM64 |
641 | #define CORRUPT_PAC_ITERATE 10 |
642 | int i; |
643 | |
644 | if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) |
645 | pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n" ); |
646 | |
647 | if (!system_supports_address_auth()) { |
648 | pr_err("FAIL: CPU lacks pointer authentication feature\n" ); |
649 | return; |
650 | } |
651 | |
652 | pr_info("changing PAC parameters to force function return failure...\n" ); |
653 | /* |
654 | * PAC is a hash value computed from input keys, return address and |
655 | * stack pointer. As pac has fewer bits so there is a chance of |
656 | * collision, so iterate few times to reduce the collision probability. |
657 | */ |
658 | for (i = 0; i < CORRUPT_PAC_ITERATE; i++) |
659 | change_pac_parameters(); |
660 | |
661 | pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n" ); |
662 | #else |
663 | pr_err("XFAIL: this test is arm64-only\n" ); |
664 | #endif |
665 | } |
666 | |
667 | static struct crashtype crashtypes[] = { |
668 | CRASHTYPE(PANIC), |
669 | CRASHTYPE(PANIC_STOP_IRQOFF), |
670 | CRASHTYPE(BUG), |
671 | CRASHTYPE(WARNING), |
672 | CRASHTYPE(WARNING_MESSAGE), |
673 | CRASHTYPE(EXCEPTION), |
674 | CRASHTYPE(LOOP), |
675 | CRASHTYPE(EXHAUST_STACK), |
676 | CRASHTYPE(CORRUPT_STACK), |
677 | CRASHTYPE(CORRUPT_STACK_STRONG), |
678 | CRASHTYPE(REPORT_STACK), |
679 | CRASHTYPE(REPORT_STACK_CANARY), |
680 | CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), |
681 | CRASHTYPE(SOFTLOCKUP), |
682 | CRASHTYPE(HARDLOCKUP), |
683 | CRASHTYPE(SPINLOCKUP), |
684 | CRASHTYPE(HUNG_TASK), |
685 | CRASHTYPE(OVERFLOW_SIGNED), |
686 | CRASHTYPE(OVERFLOW_UNSIGNED), |
687 | CRASHTYPE(ARRAY_BOUNDS), |
688 | CRASHTYPE(FAM_BOUNDS), |
689 | CRASHTYPE(CORRUPT_LIST_ADD), |
690 | CRASHTYPE(CORRUPT_LIST_DEL), |
691 | CRASHTYPE(STACK_GUARD_PAGE_LEADING), |
692 | CRASHTYPE(STACK_GUARD_PAGE_TRAILING), |
693 | CRASHTYPE(UNSET_SMEP), |
694 | CRASHTYPE(DOUBLE_FAULT), |
695 | CRASHTYPE(CORRUPT_PAC), |
696 | }; |
697 | |
698 | struct crashtype_category bugs_crashtypes = { |
699 | .crashtypes = crashtypes, |
700 | .len = ARRAY_SIZE(crashtypes), |
701 | }; |
702 | |