1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Include in trace.c */ |
3 | |
4 | #include <uapi/linux/sched/types.h> |
5 | #include <linux/stringify.h> |
6 | #include <linux/kthread.h> |
7 | #include <linux/delay.h> |
8 | #include <linux/slab.h> |
9 | |
10 | static inline int trace_valid_entry(struct trace_entry *entry) |
11 | { |
12 | switch (entry->type) { |
13 | case TRACE_FN: |
14 | case TRACE_CTX: |
15 | case TRACE_WAKE: |
16 | case TRACE_STACK: |
17 | case TRACE_PRINT: |
18 | case TRACE_BRANCH: |
19 | case TRACE_GRAPH_ENT: |
20 | case TRACE_GRAPH_RET: |
21 | return 1; |
22 | } |
23 | return 0; |
24 | } |
25 | |
26 | static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) |
27 | { |
28 | struct ring_buffer_event *event; |
29 | struct trace_entry *entry; |
30 | unsigned int loops = 0; |
31 | |
32 | while ((event = ring_buffer_consume(buffer: buf->buffer, cpu, NULL, NULL))) { |
33 | entry = ring_buffer_event_data(event); |
34 | |
35 | /* |
36 | * The ring buffer is a size of trace_buf_size, if |
37 | * we loop more than the size, there's something wrong |
38 | * with the ring buffer. |
39 | */ |
40 | if (loops++ > trace_buf_size) { |
41 | printk(KERN_CONT ".. bad ring buffer " ); |
42 | goto failed; |
43 | } |
44 | if (!trace_valid_entry(entry)) { |
45 | printk(KERN_CONT ".. invalid entry %d " , |
46 | entry->type); |
47 | goto failed; |
48 | } |
49 | } |
50 | return 0; |
51 | |
52 | failed: |
53 | /* disable tracing */ |
54 | tracing_disabled = 1; |
55 | printk(KERN_CONT ".. corrupted trace buffer .. " ); |
56 | return -1; |
57 | } |
58 | |
59 | /* |
60 | * Test the trace buffer to see if all the elements |
61 | * are still sane. |
62 | */ |
63 | static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) |
64 | { |
65 | unsigned long flags, cnt = 0; |
66 | int cpu, ret = 0; |
67 | |
68 | /* Don't allow flipping of max traces now */ |
69 | local_irq_save(flags); |
70 | arch_spin_lock(&buf->tr->max_lock); |
71 | |
72 | cnt = ring_buffer_entries(buffer: buf->buffer); |
73 | |
74 | /* |
75 | * The trace_test_buffer_cpu runs a while loop to consume all data. |
76 | * If the calling tracer is broken, and is constantly filling |
77 | * the buffer, this will run forever, and hard lock the box. |
78 | * We disable the ring buffer while we do this test to prevent |
79 | * a hard lock up. |
80 | */ |
81 | tracing_off(); |
82 | for_each_possible_cpu(cpu) { |
83 | ret = trace_test_buffer_cpu(buf, cpu); |
84 | if (ret) |
85 | break; |
86 | } |
87 | tracing_on(); |
88 | arch_spin_unlock(&buf->tr->max_lock); |
89 | local_irq_restore(flags); |
90 | |
91 | if (count) |
92 | *count = cnt; |
93 | |
94 | return ret; |
95 | } |
96 | |
97 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
98 | { |
99 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n" , |
100 | trace->name, init_ret); |
101 | } |
102 | #ifdef CONFIG_FUNCTION_TRACER |
103 | |
104 | #ifdef CONFIG_DYNAMIC_FTRACE |
105 | |
106 | static int trace_selftest_test_probe1_cnt; |
107 | static void trace_selftest_test_probe1_func(unsigned long ip, |
108 | unsigned long pip, |
109 | struct ftrace_ops *op, |
110 | struct ftrace_regs *fregs) |
111 | { |
112 | trace_selftest_test_probe1_cnt++; |
113 | } |
114 | |
115 | static int trace_selftest_test_probe2_cnt; |
116 | static void trace_selftest_test_probe2_func(unsigned long ip, |
117 | unsigned long pip, |
118 | struct ftrace_ops *op, |
119 | struct ftrace_regs *fregs) |
120 | { |
121 | trace_selftest_test_probe2_cnt++; |
122 | } |
123 | |
124 | static int trace_selftest_test_probe3_cnt; |
125 | static void trace_selftest_test_probe3_func(unsigned long ip, |
126 | unsigned long pip, |
127 | struct ftrace_ops *op, |
128 | struct ftrace_regs *fregs) |
129 | { |
130 | trace_selftest_test_probe3_cnt++; |
131 | } |
132 | |
133 | static int trace_selftest_test_global_cnt; |
134 | static void trace_selftest_test_global_func(unsigned long ip, |
135 | unsigned long pip, |
136 | struct ftrace_ops *op, |
137 | struct ftrace_regs *fregs) |
138 | { |
139 | trace_selftest_test_global_cnt++; |
140 | } |
141 | |
142 | static int trace_selftest_test_dyn_cnt; |
143 | static void trace_selftest_test_dyn_func(unsigned long ip, |
144 | unsigned long pip, |
145 | struct ftrace_ops *op, |
146 | struct ftrace_regs *fregs) |
147 | { |
148 | trace_selftest_test_dyn_cnt++; |
149 | } |
150 | |
151 | static struct ftrace_ops test_probe1 = { |
152 | .func = trace_selftest_test_probe1_func, |
153 | }; |
154 | |
155 | static struct ftrace_ops test_probe2 = { |
156 | .func = trace_selftest_test_probe2_func, |
157 | }; |
158 | |
159 | static struct ftrace_ops test_probe3 = { |
160 | .func = trace_selftest_test_probe3_func, |
161 | }; |
162 | |
163 | static void print_counts(void) |
164 | { |
165 | printk("(%d %d %d %d %d) " , |
166 | trace_selftest_test_probe1_cnt, |
167 | trace_selftest_test_probe2_cnt, |
168 | trace_selftest_test_probe3_cnt, |
169 | trace_selftest_test_global_cnt, |
170 | trace_selftest_test_dyn_cnt); |
171 | } |
172 | |
173 | static void reset_counts(void) |
174 | { |
175 | trace_selftest_test_probe1_cnt = 0; |
176 | trace_selftest_test_probe2_cnt = 0; |
177 | trace_selftest_test_probe3_cnt = 0; |
178 | trace_selftest_test_global_cnt = 0; |
179 | trace_selftest_test_dyn_cnt = 0; |
180 | } |
181 | |
182 | static int trace_selftest_ops(struct trace_array *tr, int cnt) |
183 | { |
184 | int save_ftrace_enabled = ftrace_enabled; |
185 | struct ftrace_ops *dyn_ops; |
186 | char *func1_name; |
187 | char *func2_name; |
188 | int len1; |
189 | int len2; |
190 | int ret = -1; |
191 | |
192 | printk(KERN_CONT "PASSED\n" ); |
193 | pr_info("Testing dynamic ftrace ops #%d: " , cnt); |
194 | |
195 | ftrace_enabled = 1; |
196 | reset_counts(); |
197 | |
198 | /* Handle PPC64 '.' name */ |
199 | func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
200 | func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); |
201 | len1 = strlen(func1_name); |
202 | len2 = strlen(func2_name); |
203 | |
204 | /* |
205 | * Probe 1 will trace function 1. |
206 | * Probe 2 will trace function 2. |
207 | * Probe 3 will trace functions 1 and 2. |
208 | */ |
209 | ftrace_set_filter(ops: &test_probe1, buf: func1_name, len: len1, reset: 1); |
210 | ftrace_set_filter(ops: &test_probe2, buf: func2_name, len: len2, reset: 1); |
211 | ftrace_set_filter(ops: &test_probe3, buf: func1_name, len: len1, reset: 1); |
212 | ftrace_set_filter(ops: &test_probe3, buf: func2_name, len: len2, reset: 0); |
213 | |
214 | register_ftrace_function(ops: &test_probe1); |
215 | register_ftrace_function(ops: &test_probe2); |
216 | register_ftrace_function(ops: &test_probe3); |
217 | /* First time we are running with main function */ |
218 | if (cnt > 1) { |
219 | ftrace_init_array_ops(tr, func: trace_selftest_test_global_func); |
220 | register_ftrace_function(ops: tr->ops); |
221 | } |
222 | |
223 | DYN_FTRACE_TEST_NAME(); |
224 | |
225 | print_counts(); |
226 | |
227 | if (trace_selftest_test_probe1_cnt != 1) |
228 | goto out; |
229 | if (trace_selftest_test_probe2_cnt != 0) |
230 | goto out; |
231 | if (trace_selftest_test_probe3_cnt != 1) |
232 | goto out; |
233 | if (cnt > 1) { |
234 | if (trace_selftest_test_global_cnt == 0) |
235 | goto out; |
236 | } |
237 | |
238 | DYN_FTRACE_TEST_NAME2(); |
239 | |
240 | print_counts(); |
241 | |
242 | if (trace_selftest_test_probe1_cnt != 1) |
243 | goto out; |
244 | if (trace_selftest_test_probe2_cnt != 1) |
245 | goto out; |
246 | if (trace_selftest_test_probe3_cnt != 2) |
247 | goto out; |
248 | |
249 | /* Add a dynamic probe */ |
250 | dyn_ops = kzalloc(size: sizeof(*dyn_ops), GFP_KERNEL); |
251 | if (!dyn_ops) { |
252 | printk("MEMORY ERROR " ); |
253 | goto out; |
254 | } |
255 | |
256 | dyn_ops->func = trace_selftest_test_dyn_func; |
257 | |
258 | register_ftrace_function(ops: dyn_ops); |
259 | |
260 | trace_selftest_test_global_cnt = 0; |
261 | |
262 | DYN_FTRACE_TEST_NAME(); |
263 | |
264 | print_counts(); |
265 | |
266 | if (trace_selftest_test_probe1_cnt != 2) |
267 | goto out_free; |
268 | if (trace_selftest_test_probe2_cnt != 1) |
269 | goto out_free; |
270 | if (trace_selftest_test_probe3_cnt != 3) |
271 | goto out_free; |
272 | if (cnt > 1) { |
273 | if (trace_selftest_test_global_cnt == 0) |
274 | goto out_free; |
275 | } |
276 | if (trace_selftest_test_dyn_cnt == 0) |
277 | goto out_free; |
278 | |
279 | DYN_FTRACE_TEST_NAME2(); |
280 | |
281 | print_counts(); |
282 | |
283 | if (trace_selftest_test_probe1_cnt != 2) |
284 | goto out_free; |
285 | if (trace_selftest_test_probe2_cnt != 2) |
286 | goto out_free; |
287 | if (trace_selftest_test_probe3_cnt != 4) |
288 | goto out_free; |
289 | |
290 | /* Remove trace function from probe 3 */ |
291 | func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); |
292 | len1 = strlen(func1_name); |
293 | |
294 | ftrace_set_filter(ops: &test_probe3, buf: func1_name, len: len1, reset: 0); |
295 | |
296 | DYN_FTRACE_TEST_NAME(); |
297 | |
298 | print_counts(); |
299 | |
300 | if (trace_selftest_test_probe1_cnt != 3) |
301 | goto out_free; |
302 | if (trace_selftest_test_probe2_cnt != 2) |
303 | goto out_free; |
304 | if (trace_selftest_test_probe3_cnt != 4) |
305 | goto out_free; |
306 | if (cnt > 1) { |
307 | if (trace_selftest_test_global_cnt == 0) |
308 | goto out_free; |
309 | } |
310 | if (trace_selftest_test_dyn_cnt == 0) |
311 | goto out_free; |
312 | |
313 | DYN_FTRACE_TEST_NAME2(); |
314 | |
315 | print_counts(); |
316 | |
317 | if (trace_selftest_test_probe1_cnt != 3) |
318 | goto out_free; |
319 | if (trace_selftest_test_probe2_cnt != 3) |
320 | goto out_free; |
321 | if (trace_selftest_test_probe3_cnt != 5) |
322 | goto out_free; |
323 | |
324 | ret = 0; |
325 | out_free: |
326 | unregister_ftrace_function(ops: dyn_ops); |
327 | kfree(objp: dyn_ops); |
328 | |
329 | out: |
330 | /* Purposely unregister in the same order */ |
331 | unregister_ftrace_function(ops: &test_probe1); |
332 | unregister_ftrace_function(ops: &test_probe2); |
333 | unregister_ftrace_function(ops: &test_probe3); |
334 | if (cnt > 1) |
335 | unregister_ftrace_function(ops: tr->ops); |
336 | ftrace_reset_array_ops(tr); |
337 | |
338 | /* Make sure everything is off */ |
339 | reset_counts(); |
340 | DYN_FTRACE_TEST_NAME(); |
341 | DYN_FTRACE_TEST_NAME(); |
342 | |
343 | if (trace_selftest_test_probe1_cnt || |
344 | trace_selftest_test_probe2_cnt || |
345 | trace_selftest_test_probe3_cnt || |
346 | trace_selftest_test_global_cnt || |
347 | trace_selftest_test_dyn_cnt) |
348 | ret = -1; |
349 | |
350 | ftrace_enabled = save_ftrace_enabled; |
351 | |
352 | return ret; |
353 | } |
354 | |
355 | /* Test dynamic code modification and ftrace filters */ |
356 | static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
357 | struct trace_array *tr, |
358 | int (*func)(void)) |
359 | { |
360 | int save_ftrace_enabled = ftrace_enabled; |
361 | unsigned long count; |
362 | char *func_name; |
363 | int ret; |
364 | |
365 | /* The ftrace test PASSED */ |
366 | printk(KERN_CONT "PASSED\n" ); |
367 | pr_info("Testing dynamic ftrace: " ); |
368 | |
369 | /* enable tracing, and record the filter function */ |
370 | ftrace_enabled = 1; |
371 | |
372 | /* passed in by parameter to fool gcc from optimizing */ |
373 | func(); |
374 | |
375 | /* |
376 | * Some archs *cough*PowerPC*cough* add characters to the |
377 | * start of the function names. We simply put a '*' to |
378 | * accommodate them. |
379 | */ |
380 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
381 | |
382 | /* filter only on our function */ |
383 | ftrace_set_global_filter(buf: func_name, strlen(func_name), reset: 1); |
384 | |
385 | /* enable tracing */ |
386 | ret = tracer_init(t: trace, tr); |
387 | if (ret) { |
388 | warn_failed_init_tracer(trace, init_ret: ret); |
389 | goto out; |
390 | } |
391 | |
392 | /* Sleep for a 1/10 of a second */ |
393 | msleep(msecs: 100); |
394 | |
395 | /* we should have nothing in the buffer */ |
396 | ret = trace_test_buffer(buf: &tr->array_buffer, count: &count); |
397 | if (ret) |
398 | goto out; |
399 | |
400 | if (count) { |
401 | ret = -1; |
402 | printk(KERN_CONT ".. filter did not filter .. " ); |
403 | goto out; |
404 | } |
405 | |
406 | /* call our function again */ |
407 | func(); |
408 | |
409 | /* sleep again */ |
410 | msleep(msecs: 100); |
411 | |
412 | /* stop the tracing. */ |
413 | tracing_stop(); |
414 | ftrace_enabled = 0; |
415 | |
416 | /* check the trace buffer */ |
417 | ret = trace_test_buffer(buf: &tr->array_buffer, count: &count); |
418 | |
419 | ftrace_enabled = 1; |
420 | tracing_start(); |
421 | |
422 | /* we should only have one item */ |
423 | if (!ret && count != 1) { |
424 | trace->reset(tr); |
425 | printk(KERN_CONT ".. filter failed count=%ld .." , count); |
426 | ret = -1; |
427 | goto out; |
428 | } |
429 | |
430 | /* Test the ops with global tracing running */ |
431 | ret = trace_selftest_ops(tr, cnt: 1); |
432 | trace->reset(tr); |
433 | |
434 | out: |
435 | ftrace_enabled = save_ftrace_enabled; |
436 | |
437 | /* Enable tracing on all functions again */ |
438 | ftrace_set_global_filter(NULL, len: 0, reset: 1); |
439 | |
440 | /* Test the ops with global tracing off */ |
441 | if (!ret) |
442 | ret = trace_selftest_ops(tr, cnt: 2); |
443 | |
444 | return ret; |
445 | } |
446 | |
447 | static int trace_selftest_recursion_cnt; |
448 | static void trace_selftest_test_recursion_func(unsigned long ip, |
449 | unsigned long pip, |
450 | struct ftrace_ops *op, |
451 | struct ftrace_regs *fregs) |
452 | { |
453 | /* |
454 | * This function is registered without the recursion safe flag. |
455 | * The ftrace infrastructure should provide the recursion |
456 | * protection. If not, this will crash the kernel! |
457 | */ |
458 | if (trace_selftest_recursion_cnt++ > 10) |
459 | return; |
460 | DYN_FTRACE_TEST_NAME(); |
461 | } |
462 | |
463 | static void trace_selftest_test_recursion_safe_func(unsigned long ip, |
464 | unsigned long pip, |
465 | struct ftrace_ops *op, |
466 | struct ftrace_regs *fregs) |
467 | { |
468 | /* |
469 | * We said we would provide our own recursion. By calling |
470 | * this function again, we should recurse back into this function |
471 | * and count again. But this only happens if the arch supports |
472 | * all of ftrace features and nothing else is using the function |
473 | * tracing utility. |
474 | */ |
475 | if (trace_selftest_recursion_cnt++) |
476 | return; |
477 | DYN_FTRACE_TEST_NAME(); |
478 | } |
479 | |
480 | static struct ftrace_ops test_rec_probe = { |
481 | .func = trace_selftest_test_recursion_func, |
482 | .flags = FTRACE_OPS_FL_RECURSION, |
483 | }; |
484 | |
485 | static struct ftrace_ops test_recsafe_probe = { |
486 | .func = trace_selftest_test_recursion_safe_func, |
487 | }; |
488 | |
489 | static int |
490 | trace_selftest_function_recursion(void) |
491 | { |
492 | int save_ftrace_enabled = ftrace_enabled; |
493 | char *func_name; |
494 | int len; |
495 | int ret; |
496 | |
497 | /* The previous test PASSED */ |
498 | pr_cont("PASSED\n" ); |
499 | pr_info("Testing ftrace recursion: " ); |
500 | |
501 | |
502 | /* enable tracing, and record the filter function */ |
503 | ftrace_enabled = 1; |
504 | |
505 | /* Handle PPC64 '.' name */ |
506 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
507 | len = strlen(func_name); |
508 | |
509 | ret = ftrace_set_filter(ops: &test_rec_probe, buf: func_name, len, reset: 1); |
510 | if (ret) { |
511 | pr_cont("*Could not set filter* " ); |
512 | goto out; |
513 | } |
514 | |
515 | ret = register_ftrace_function(ops: &test_rec_probe); |
516 | if (ret) { |
517 | pr_cont("*could not register callback* " ); |
518 | goto out; |
519 | } |
520 | |
521 | DYN_FTRACE_TEST_NAME(); |
522 | |
523 | unregister_ftrace_function(ops: &test_rec_probe); |
524 | |
525 | ret = -1; |
526 | /* |
527 | * Recursion allows for transitions between context, |
528 | * and may call the callback twice. |
529 | */ |
530 | if (trace_selftest_recursion_cnt != 1 && |
531 | trace_selftest_recursion_cnt != 2) { |
532 | pr_cont("*callback not called once (or twice) (%d)* " , |
533 | trace_selftest_recursion_cnt); |
534 | goto out; |
535 | } |
536 | |
537 | trace_selftest_recursion_cnt = 1; |
538 | |
539 | pr_cont("PASSED\n" ); |
540 | pr_info("Testing ftrace recursion safe: " ); |
541 | |
542 | ret = ftrace_set_filter(ops: &test_recsafe_probe, buf: func_name, len, reset: 1); |
543 | if (ret) { |
544 | pr_cont("*Could not set filter* " ); |
545 | goto out; |
546 | } |
547 | |
548 | ret = register_ftrace_function(ops: &test_recsafe_probe); |
549 | if (ret) { |
550 | pr_cont("*could not register callback* " ); |
551 | goto out; |
552 | } |
553 | |
554 | DYN_FTRACE_TEST_NAME(); |
555 | |
556 | unregister_ftrace_function(ops: &test_recsafe_probe); |
557 | |
558 | ret = -1; |
559 | if (trace_selftest_recursion_cnt != 2) { |
560 | pr_cont("*callback not called expected 2 times (%d)* " , |
561 | trace_selftest_recursion_cnt); |
562 | goto out; |
563 | } |
564 | |
565 | ret = 0; |
566 | out: |
567 | ftrace_enabled = save_ftrace_enabled; |
568 | |
569 | return ret; |
570 | } |
571 | #else |
572 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) |
573 | # define trace_selftest_function_recursion() ({ 0; }) |
574 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
575 | |
576 | static enum { |
577 | TRACE_SELFTEST_REGS_START, |
578 | TRACE_SELFTEST_REGS_FOUND, |
579 | TRACE_SELFTEST_REGS_NOT_FOUND, |
580 | } trace_selftest_regs_stat; |
581 | |
582 | static void trace_selftest_test_regs_func(unsigned long ip, |
583 | unsigned long pip, |
584 | struct ftrace_ops *op, |
585 | struct ftrace_regs *fregs) |
586 | { |
587 | struct pt_regs *regs = ftrace_get_regs(fregs); |
588 | |
589 | if (regs) |
590 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; |
591 | else |
592 | trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; |
593 | } |
594 | |
595 | static struct ftrace_ops test_regs_probe = { |
596 | .func = trace_selftest_test_regs_func, |
597 | .flags = FTRACE_OPS_FL_SAVE_REGS, |
598 | }; |
599 | |
600 | static int |
601 | trace_selftest_function_regs(void) |
602 | { |
603 | int save_ftrace_enabled = ftrace_enabled; |
604 | char *func_name; |
605 | int len; |
606 | int ret; |
607 | int supported = 0; |
608 | |
609 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
610 | supported = 1; |
611 | #endif |
612 | |
613 | /* The previous test PASSED */ |
614 | pr_cont("PASSED\n" ); |
615 | pr_info("Testing ftrace regs%s: " , |
616 | !supported ? "(no arch support)" : "" ); |
617 | |
618 | /* enable tracing, and record the filter function */ |
619 | ftrace_enabled = 1; |
620 | |
621 | /* Handle PPC64 '.' name */ |
622 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
623 | len = strlen(func_name); |
624 | |
625 | ret = ftrace_set_filter(ops: &test_regs_probe, buf: func_name, len, reset: 1); |
626 | /* |
627 | * If DYNAMIC_FTRACE is not set, then we just trace all functions. |
628 | * This test really doesn't care. |
629 | */ |
630 | if (ret && ret != -ENODEV) { |
631 | pr_cont("*Could not set filter* " ); |
632 | goto out; |
633 | } |
634 | |
635 | ret = register_ftrace_function(ops: &test_regs_probe); |
636 | /* |
637 | * Now if the arch does not support passing regs, then this should |
638 | * have failed. |
639 | */ |
640 | if (!supported) { |
641 | if (!ret) { |
642 | pr_cont("*registered save-regs without arch support* " ); |
643 | goto out; |
644 | } |
645 | test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; |
646 | ret = register_ftrace_function(ops: &test_regs_probe); |
647 | } |
648 | if (ret) { |
649 | pr_cont("*could not register callback* " ); |
650 | goto out; |
651 | } |
652 | |
653 | |
654 | DYN_FTRACE_TEST_NAME(); |
655 | |
656 | unregister_ftrace_function(ops: &test_regs_probe); |
657 | |
658 | ret = -1; |
659 | |
660 | switch (trace_selftest_regs_stat) { |
661 | case TRACE_SELFTEST_REGS_START: |
662 | pr_cont("*callback never called* " ); |
663 | goto out; |
664 | |
665 | case TRACE_SELFTEST_REGS_FOUND: |
666 | if (supported) |
667 | break; |
668 | pr_cont("*callback received regs without arch support* " ); |
669 | goto out; |
670 | |
671 | case TRACE_SELFTEST_REGS_NOT_FOUND: |
672 | if (!supported) |
673 | break; |
674 | pr_cont("*callback received NULL regs* " ); |
675 | goto out; |
676 | } |
677 | |
678 | ret = 0; |
679 | out: |
680 | ftrace_enabled = save_ftrace_enabled; |
681 | |
682 | return ret; |
683 | } |
684 | |
685 | /* |
686 | * Simple verification test of ftrace function tracer. |
687 | * Enable ftrace, sleep 1/10 second, and then read the trace |
688 | * buffer to see if all is in order. |
689 | */ |
690 | __init int |
691 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
692 | { |
693 | int save_ftrace_enabled = ftrace_enabled; |
694 | unsigned long count; |
695 | int ret; |
696 | |
697 | #ifdef CONFIG_DYNAMIC_FTRACE |
698 | if (ftrace_filter_param) { |
699 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... " ); |
700 | return 0; |
701 | } |
702 | #endif |
703 | |
704 | /* make sure msleep has been recorded */ |
705 | msleep(msecs: 1); |
706 | |
707 | /* start the tracing */ |
708 | ftrace_enabled = 1; |
709 | |
710 | ret = tracer_init(t: trace, tr); |
711 | if (ret) { |
712 | warn_failed_init_tracer(trace, init_ret: ret); |
713 | goto out; |
714 | } |
715 | |
716 | /* Sleep for a 1/10 of a second */ |
717 | msleep(msecs: 100); |
718 | /* stop the tracing. */ |
719 | tracing_stop(); |
720 | ftrace_enabled = 0; |
721 | |
722 | /* check the trace buffer */ |
723 | ret = trace_test_buffer(buf: &tr->array_buffer, count: &count); |
724 | |
725 | ftrace_enabled = 1; |
726 | trace->reset(tr); |
727 | tracing_start(); |
728 | |
729 | if (!ret && !count) { |
730 | printk(KERN_CONT ".. no entries found .." ); |
731 | ret = -1; |
732 | goto out; |
733 | } |
734 | |
735 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
736 | DYN_FTRACE_TEST_NAME); |
737 | if (ret) |
738 | goto out; |
739 | |
740 | ret = trace_selftest_function_recursion(); |
741 | if (ret) |
742 | goto out; |
743 | |
744 | ret = trace_selftest_function_regs(); |
745 | out: |
746 | ftrace_enabled = save_ftrace_enabled; |
747 | |
748 | /* kill ftrace totally if we failed */ |
749 | if (ret) |
750 | ftrace_kill(); |
751 | |
752 | return ret; |
753 | } |
754 | #endif /* CONFIG_FUNCTION_TRACER */ |
755 | |
756 | |
757 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
758 | |
759 | /* Maximum number of functions to trace before diagnosing a hang */ |
760 | #define GRAPH_MAX_FUNC_TEST 100000000 |
761 | |
762 | static unsigned int graph_hang_thresh; |
763 | |
764 | /* Wrap the real function entry probe to avoid possible hanging */ |
765 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) |
766 | { |
767 | /* This is harmlessly racy, we want to approximately detect a hang */ |
768 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { |
769 | ftrace_graph_stop(); |
770 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n" ); |
771 | if (ftrace_dump_on_oops) { |
772 | ftrace_dump(oops_dump_mode: DUMP_ALL); |
773 | /* ftrace_dump() disables tracing */ |
774 | tracing_on(); |
775 | } |
776 | return 0; |
777 | } |
778 | |
779 | return trace_graph_entry(trace); |
780 | } |
781 | |
782 | static struct fgraph_ops fgraph_ops __initdata = { |
783 | .entryfunc = &trace_graph_entry_watchdog, |
784 | .retfunc = &trace_graph_return, |
785 | }; |
786 | |
787 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
788 | static struct ftrace_ops direct; |
789 | #endif |
790 | |
791 | /* |
792 | * Pretty much the same than for the function tracer from which the selftest |
793 | * has been borrowed. |
794 | */ |
795 | __init int |
796 | trace_selftest_startup_function_graph(struct tracer *trace, |
797 | struct trace_array *tr) |
798 | { |
799 | int ret; |
800 | unsigned long count; |
801 | char *func_name __maybe_unused; |
802 | |
803 | #ifdef CONFIG_DYNAMIC_FTRACE |
804 | if (ftrace_filter_param) { |
805 | printk(KERN_CONT " ... kernel command line filter set: force PASS ... " ); |
806 | return 0; |
807 | } |
808 | #endif |
809 | |
810 | /* |
811 | * Simulate the init() callback but we attach a watchdog callback |
812 | * to detect and recover from possible hangs |
813 | */ |
814 | tracing_reset_online_cpus(buf: &tr->array_buffer); |
815 | set_graph_array(tr); |
816 | ret = register_ftrace_graph(ops: &fgraph_ops); |
817 | if (ret) { |
818 | warn_failed_init_tracer(trace, init_ret: ret); |
819 | goto out; |
820 | } |
821 | tracing_start_cmdline_record(); |
822 | |
823 | /* Sleep for a 1/10 of a second */ |
824 | msleep(msecs: 100); |
825 | |
826 | /* Have we just recovered from a hang? */ |
827 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { |
828 | disable_tracing_selftest(reason: "recovering from a hang" ); |
829 | ret = -1; |
830 | goto out; |
831 | } |
832 | |
833 | tracing_stop(); |
834 | |
835 | /* check the trace buffer */ |
836 | ret = trace_test_buffer(buf: &tr->array_buffer, count: &count); |
837 | |
838 | /* Need to also simulate the tr->reset to remove this fgraph_ops */ |
839 | tracing_stop_cmdline_record(); |
840 | unregister_ftrace_graph(ops: &fgraph_ops); |
841 | |
842 | tracing_start(); |
843 | |
844 | if (!ret && !count) { |
845 | printk(KERN_CONT ".. no entries found .." ); |
846 | ret = -1; |
847 | goto out; |
848 | } |
849 | |
850 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
851 | /* |
852 | * These tests can take some time to run. Make sure on non PREEMPT |
853 | * kernels, we do not trigger the softlockup detector. |
854 | */ |
855 | cond_resched(); |
856 | |
857 | tracing_reset_online_cpus(buf: &tr->array_buffer); |
858 | set_graph_array(tr); |
859 | |
860 | /* |
861 | * Some archs *cough*PowerPC*cough* add characters to the |
862 | * start of the function names. We simply put a '*' to |
863 | * accommodate them. |
864 | */ |
865 | func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); |
866 | ftrace_set_global_filter(buf: func_name, strlen(func_name), reset: 1); |
867 | |
868 | /* |
869 | * Register direct function together with graph tracer |
870 | * and make sure we get graph trace. |
871 | */ |
872 | ftrace_set_filter_ip(ops: &direct, ip: (unsigned long)DYN_FTRACE_TEST_NAME, remove: 0, reset: 0); |
873 | ret = register_ftrace_direct(ops: &direct, |
874 | addr: (unsigned long)ftrace_stub_direct_tramp); |
875 | if (ret) |
876 | goto out; |
877 | |
878 | cond_resched(); |
879 | |
880 | ret = register_ftrace_graph(ops: &fgraph_ops); |
881 | if (ret) { |
882 | warn_failed_init_tracer(trace, init_ret: ret); |
883 | goto out; |
884 | } |
885 | |
886 | DYN_FTRACE_TEST_NAME(); |
887 | |
888 | count = 0; |
889 | |
890 | tracing_stop(); |
891 | /* check the trace buffer */ |
892 | ret = trace_test_buffer(buf: &tr->array_buffer, count: &count); |
893 | |
894 | unregister_ftrace_graph(ops: &fgraph_ops); |
895 | |
896 | ret = unregister_ftrace_direct(ops: &direct, |
897 | addr: (unsigned long)ftrace_stub_direct_tramp, |
898 | free_filters: true); |
899 | if (ret) |
900 | goto out; |
901 | |
902 | cond_resched(); |
903 | |
904 | tracing_start(); |
905 | |
906 | if (!ret && !count) { |
907 | ret = -1; |
908 | goto out; |
909 | } |
910 | |
911 | /* Enable tracing on all functions again */ |
912 | ftrace_set_global_filter(NULL, len: 0, reset: 1); |
913 | #endif |
914 | |
915 | /* Don't test dynamic tracing, the function tracer already did */ |
916 | out: |
917 | /* Stop it if we failed */ |
918 | if (ret) |
919 | ftrace_graph_stop(); |
920 | |
921 | return ret; |
922 | } |
923 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
924 | |
925 | |
926 | #ifdef CONFIG_IRQSOFF_TRACER |
927 | int |
928 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
929 | { |
930 | unsigned long save_max = tr->max_latency; |
931 | unsigned long count; |
932 | int ret; |
933 | |
934 | /* start the tracing */ |
935 | ret = tracer_init(t: trace, tr); |
936 | if (ret) { |
937 | warn_failed_init_tracer(trace, init_ret: ret); |
938 | return ret; |
939 | } |
940 | |
941 | /* reset the max latency */ |
942 | tr->max_latency = 0; |
943 | /* disable interrupts for a bit */ |
944 | local_irq_disable(); |
945 | udelay(100); |
946 | local_irq_enable(); |
947 | |
948 | /* |
949 | * Stop the tracer to avoid a warning subsequent |
950 | * to buffer flipping failure because tracing_stop() |
951 | * disables the tr and max buffers, making flipping impossible |
952 | * in case of parallels max irqs off latencies. |
953 | */ |
954 | trace->stop(tr); |
955 | /* stop the tracing. */ |
956 | tracing_stop(); |
957 | /* check both trace buffers */ |
958 | ret = trace_test_buffer(buf: &tr->array_buffer, NULL); |
959 | if (!ret) |
960 | ret = trace_test_buffer(buf: &tr->max_buffer, count: &count); |
961 | trace->reset(tr); |
962 | tracing_start(); |
963 | |
964 | if (!ret && !count) { |
965 | printk(KERN_CONT ".. no entries found .." ); |
966 | ret = -1; |
967 | } |
968 | |
969 | tr->max_latency = save_max; |
970 | |
971 | return ret; |
972 | } |
973 | #endif /* CONFIG_IRQSOFF_TRACER */ |
974 | |
975 | #ifdef CONFIG_PREEMPT_TRACER |
976 | int |
977 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) |
978 | { |
979 | unsigned long save_max = tr->max_latency; |
980 | unsigned long count; |
981 | int ret; |
982 | |
983 | /* |
984 | * Now that the big kernel lock is no longer preemptible, |
985 | * and this is called with the BKL held, it will always |
986 | * fail. If preemption is already disabled, simply |
987 | * pass the test. When the BKL is removed, or becomes |
988 | * preemptible again, we will once again test this, |
989 | * so keep it in. |
990 | */ |
991 | if (preempt_count()) { |
992 | printk(KERN_CONT "can not test ... force " ); |
993 | return 0; |
994 | } |
995 | |
996 | /* start the tracing */ |
997 | ret = tracer_init(t: trace, tr); |
998 | if (ret) { |
999 | warn_failed_init_tracer(trace, init_ret: ret); |
1000 | return ret; |
1001 | } |
1002 | |
1003 | /* reset the max latency */ |
1004 | tr->max_latency = 0; |
1005 | /* disable preemption for a bit */ |
1006 | preempt_disable(); |
1007 | udelay(100); |
1008 | preempt_enable(); |
1009 | |
1010 | /* |
1011 | * Stop the tracer to avoid a warning subsequent |
1012 | * to buffer flipping failure because tracing_stop() |
1013 | * disables the tr and max buffers, making flipping impossible |
1014 | * in case of parallels max preempt off latencies. |
1015 | */ |
1016 | trace->stop(tr); |
1017 | /* stop the tracing. */ |
1018 | tracing_stop(); |
1019 | /* check both trace buffers */ |
1020 | ret = trace_test_buffer(buf: &tr->array_buffer, NULL); |
1021 | if (!ret) |
1022 | ret = trace_test_buffer(buf: &tr->max_buffer, count: &count); |
1023 | trace->reset(tr); |
1024 | tracing_start(); |
1025 | |
1026 | if (!ret && !count) { |
1027 | printk(KERN_CONT ".. no entries found .." ); |
1028 | ret = -1; |
1029 | } |
1030 | |
1031 | tr->max_latency = save_max; |
1032 | |
1033 | return ret; |
1034 | } |
1035 | #endif /* CONFIG_PREEMPT_TRACER */ |
1036 | |
1037 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
1038 | int |
1039 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) |
1040 | { |
1041 | unsigned long save_max = tr->max_latency; |
1042 | unsigned long count; |
1043 | int ret; |
1044 | |
1045 | /* |
1046 | * Now that the big kernel lock is no longer preemptible, |
1047 | * and this is called with the BKL held, it will always |
1048 | * fail. If preemption is already disabled, simply |
1049 | * pass the test. When the BKL is removed, or becomes |
1050 | * preemptible again, we will once again test this, |
1051 | * so keep it in. |
1052 | */ |
1053 | if (preempt_count()) { |
1054 | printk(KERN_CONT "can not test ... force " ); |
1055 | return 0; |
1056 | } |
1057 | |
1058 | /* start the tracing */ |
1059 | ret = tracer_init(t: trace, tr); |
1060 | if (ret) { |
1061 | warn_failed_init_tracer(trace, init_ret: ret); |
1062 | goto out_no_start; |
1063 | } |
1064 | |
1065 | /* reset the max latency */ |
1066 | tr->max_latency = 0; |
1067 | |
1068 | /* disable preemption and interrupts for a bit */ |
1069 | preempt_disable(); |
1070 | local_irq_disable(); |
1071 | udelay(100); |
1072 | preempt_enable(); |
1073 | /* reverse the order of preempt vs irqs */ |
1074 | local_irq_enable(); |
1075 | |
1076 | /* |
1077 | * Stop the tracer to avoid a warning subsequent |
1078 | * to buffer flipping failure because tracing_stop() |
1079 | * disables the tr and max buffers, making flipping impossible |
1080 | * in case of parallels max irqs/preempt off latencies. |
1081 | */ |
1082 | trace->stop(tr); |
1083 | /* stop the tracing. */ |
1084 | tracing_stop(); |
1085 | /* check both trace buffers */ |
1086 | ret = trace_test_buffer(buf: &tr->array_buffer, NULL); |
1087 | if (ret) |
1088 | goto out; |
1089 | |
1090 | ret = trace_test_buffer(buf: &tr->max_buffer, count: &count); |
1091 | if (ret) |
1092 | goto out; |
1093 | |
1094 | if (!ret && !count) { |
1095 | printk(KERN_CONT ".. no entries found .." ); |
1096 | ret = -1; |
1097 | goto out; |
1098 | } |
1099 | |
1100 | /* do the test by disabling interrupts first this time */ |
1101 | tr->max_latency = 0; |
1102 | tracing_start(); |
1103 | trace->start(tr); |
1104 | |
1105 | preempt_disable(); |
1106 | local_irq_disable(); |
1107 | udelay(100); |
1108 | preempt_enable(); |
1109 | /* reverse the order of preempt vs irqs */ |
1110 | local_irq_enable(); |
1111 | |
1112 | trace->stop(tr); |
1113 | /* stop the tracing. */ |
1114 | tracing_stop(); |
1115 | /* check both trace buffers */ |
1116 | ret = trace_test_buffer(buf: &tr->array_buffer, NULL); |
1117 | if (ret) |
1118 | goto out; |
1119 | |
1120 | ret = trace_test_buffer(buf: &tr->max_buffer, count: &count); |
1121 | |
1122 | if (!ret && !count) { |
1123 | printk(KERN_CONT ".. no entries found .." ); |
1124 | ret = -1; |
1125 | goto out; |
1126 | } |
1127 | |
1128 | out: |
1129 | tracing_start(); |
1130 | out_no_start: |
1131 | trace->reset(tr); |
1132 | tr->max_latency = save_max; |
1133 | |
1134 | return ret; |
1135 | } |
1136 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ |
1137 | |
1138 | #ifdef CONFIG_NOP_TRACER |
1139 | int |
1140 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) |
1141 | { |
1142 | /* What could possibly go wrong? */ |
1143 | return 0; |
1144 | } |
1145 | #endif |
1146 | |
1147 | #ifdef CONFIG_SCHED_TRACER |
1148 | |
1149 | struct wakeup_test_data { |
1150 | struct completion is_ready; |
1151 | int go; |
1152 | }; |
1153 | |
1154 | static int trace_wakeup_test_thread(void *data) |
1155 | { |
1156 | /* Make this a -deadline thread */ |
1157 | static const struct sched_attr attr = { |
1158 | .sched_policy = SCHED_DEADLINE, |
1159 | .sched_runtime = 100000ULL, |
1160 | .sched_deadline = 10000000ULL, |
1161 | .sched_period = 10000000ULL |
1162 | }; |
1163 | struct wakeup_test_data *x = data; |
1164 | |
1165 | sched_setattr(current, &attr); |
1166 | |
1167 | /* Make it know we have a new prio */ |
1168 | complete(&x->is_ready); |
1169 | |
1170 | /* now go to sleep and let the test wake us up */ |
1171 | set_current_state(TASK_INTERRUPTIBLE); |
1172 | while (!x->go) { |
1173 | schedule(); |
1174 | set_current_state(TASK_INTERRUPTIBLE); |
1175 | } |
1176 | |
1177 | complete(&x->is_ready); |
1178 | |
1179 | set_current_state(TASK_INTERRUPTIBLE); |
1180 | |
1181 | /* we are awake, now wait to disappear */ |
1182 | while (!kthread_should_stop()) { |
1183 | schedule(); |
1184 | set_current_state(TASK_INTERRUPTIBLE); |
1185 | } |
1186 | |
1187 | __set_current_state(TASK_RUNNING); |
1188 | |
1189 | return 0; |
1190 | } |
1191 | int |
1192 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) |
1193 | { |
1194 | unsigned long save_max = tr->max_latency; |
1195 | struct task_struct *p; |
1196 | struct wakeup_test_data data; |
1197 | unsigned long count; |
1198 | int ret; |
1199 | |
1200 | memset(&data, 0, sizeof(data)); |
1201 | |
1202 | init_completion(x: &data.is_ready); |
1203 | |
1204 | /* create a -deadline thread */ |
1205 | p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test" ); |
1206 | if (IS_ERR(ptr: p)) { |
1207 | printk(KERN_CONT "Failed to create ftrace wakeup test thread " ); |
1208 | return -1; |
1209 | } |
1210 | |
1211 | /* make sure the thread is running at -deadline policy */ |
1212 | wait_for_completion(&data.is_ready); |
1213 | |
1214 | /* start the tracing */ |
1215 | ret = tracer_init(t: trace, tr); |
1216 | if (ret) { |
1217 | warn_failed_init_tracer(trace, init_ret: ret); |
1218 | return ret; |
1219 | } |
1220 | |
1221 | /* reset the max latency */ |
1222 | tr->max_latency = 0; |
1223 | |
1224 | while (p->on_rq) { |
1225 | /* |
1226 | * Sleep to make sure the -deadline thread is asleep too. |
1227 | * On virtual machines we can't rely on timings, |
1228 | * but we want to make sure this test still works. |
1229 | */ |
1230 | msleep(msecs: 100); |
1231 | } |
1232 | |
1233 | init_completion(x: &data.is_ready); |
1234 | |
1235 | data.go = 1; |
1236 | /* memory barrier is in the wake_up_process() */ |
1237 | |
1238 | wake_up_process(tsk: p); |
1239 | |
1240 | /* Wait for the task to wake up */ |
1241 | wait_for_completion(&data.is_ready); |
1242 | |
1243 | /* stop the tracing. */ |
1244 | tracing_stop(); |
1245 | /* check both trace buffers */ |
1246 | ret = trace_test_buffer(buf: &tr->array_buffer, NULL); |
1247 | if (!ret) |
1248 | ret = trace_test_buffer(buf: &tr->max_buffer, count: &count); |
1249 | |
1250 | |
1251 | trace->reset(tr); |
1252 | tracing_start(); |
1253 | |
1254 | tr->max_latency = save_max; |
1255 | |
1256 | /* kill the thread */ |
1257 | kthread_stop(k: p); |
1258 | |
1259 | if (!ret && !count) { |
1260 | printk(KERN_CONT ".. no entries found .." ); |
1261 | ret = -1; |
1262 | } |
1263 | |
1264 | return ret; |
1265 | } |
1266 | #endif /* CONFIG_SCHED_TRACER */ |
1267 | |
1268 | #ifdef CONFIG_BRANCH_TRACER |
1269 | int |
1270 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) |
1271 | { |
1272 | unsigned long count; |
1273 | int ret; |
1274 | |
1275 | /* start the tracing */ |
1276 | ret = tracer_init(trace, tr); |
1277 | if (ret) { |
1278 | warn_failed_init_tracer(trace, ret); |
1279 | return ret; |
1280 | } |
1281 | |
1282 | /* Sleep for a 1/10 of a second */ |
1283 | msleep(100); |
1284 | /* stop the tracing. */ |
1285 | tracing_stop(); |
1286 | /* check the trace buffer */ |
1287 | ret = trace_test_buffer(&tr->array_buffer, &count); |
1288 | trace->reset(tr); |
1289 | tracing_start(); |
1290 | |
1291 | if (!ret && !count) { |
1292 | printk(KERN_CONT ".. no entries found .." ); |
1293 | ret = -1; |
1294 | } |
1295 | |
1296 | return ret; |
1297 | } |
1298 | #endif /* CONFIG_BRANCH_TRACER */ |
1299 | |
1300 | |