1// SPDX-License-Identifier: GPL-2.0
2/* Include in trace.c */
3
4#include <uapi/linux/sched/types.h>
5#include <linux/stringify.h>
6#include <linux/kthread.h>
7#include <linux/delay.h>
8#include <linux/slab.h>
9
10static inline int trace_valid_entry(struct trace_entry *entry)
11{
12 switch (entry->type) {
13 case TRACE_FN:
14 case TRACE_CTX:
15 case TRACE_WAKE:
16 case TRACE_STACK:
17 case TRACE_PRINT:
18 case TRACE_BRANCH:
19 case TRACE_GRAPH_ENT:
20 case TRACE_GRAPH_RET:
21 return 1;
22 }
23 return 0;
24}
25
26static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
27{
28 struct ring_buffer_event *event;
29 struct trace_entry *entry;
30 unsigned int loops = 0;
31
32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
33 entry = ring_buffer_event_data(event);
34
35 /*
36 * The ring buffer is a size of trace_buf_size, if
37 * we loop more than the size, there's something wrong
38 * with the ring buffer.
39 */
40 if (loops++ > trace_buf_size) {
41 printk(KERN_CONT ".. bad ring buffer ");
42 goto failed;
43 }
44 if (!trace_valid_entry(entry)) {
45 printk(KERN_CONT ".. invalid entry %d ",
46 entry->type);
47 goto failed;
48 }
49 }
50 return 0;
51
52 failed:
53 /* disable tracing */
54 tracing_disabled = 1;
55 printk(KERN_CONT ".. corrupted trace buffer .. ");
56 return -1;
57}
58
59/*
60 * Test the trace buffer to see if all the elements
61 * are still sane.
62 */
63static int __maybe_unused trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
64{
65 unsigned long flags, cnt = 0;
66 int cpu, ret = 0;
67
68 /* Don't allow flipping of max traces now */
69 local_irq_save(flags);
70 arch_spin_lock(&buf->tr->max_lock);
71
72 cnt = ring_buffer_entries(buf->buffer);
73
74 /*
75 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 * If the calling tracer is broken, and is constantly filling
77 * the buffer, this will run forever, and hard lock the box.
78 * We disable the ring buffer while we do this test to prevent
79 * a hard lock up.
80 */
81 tracing_off();
82 for_each_possible_cpu(cpu) {
83 ret = trace_test_buffer_cpu(buf, cpu);
84 if (ret)
85 break;
86 }
87 tracing_on();
88 arch_spin_unlock(&buf->tr->max_lock);
89 local_irq_restore(flags);
90
91 if (count)
92 *count = cnt;
93
94 return ret;
95}
96
97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98{
99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 trace->name, init_ret);
101}
102#ifdef CONFIG_FUNCTION_TRACER
103
104#ifdef CONFIG_DYNAMIC_FTRACE
105
106static int trace_selftest_test_probe1_cnt;
107static void trace_selftest_test_probe1_func(unsigned long ip,
108 unsigned long pip,
109 struct ftrace_ops *op,
110 struct pt_regs *pt_regs)
111{
112 trace_selftest_test_probe1_cnt++;
113}
114
115static int trace_selftest_test_probe2_cnt;
116static void trace_selftest_test_probe2_func(unsigned long ip,
117 unsigned long pip,
118 struct ftrace_ops *op,
119 struct pt_regs *pt_regs)
120{
121 trace_selftest_test_probe2_cnt++;
122}
123
124static int trace_selftest_test_probe3_cnt;
125static void trace_selftest_test_probe3_func(unsigned long ip,
126 unsigned long pip,
127 struct ftrace_ops *op,
128 struct pt_regs *pt_regs)
129{
130 trace_selftest_test_probe3_cnt++;
131}
132
133static int trace_selftest_test_global_cnt;
134static void trace_selftest_test_global_func(unsigned long ip,
135 unsigned long pip,
136 struct ftrace_ops *op,
137 struct pt_regs *pt_regs)
138{
139 trace_selftest_test_global_cnt++;
140}
141
142static int trace_selftest_test_dyn_cnt;
143static void trace_selftest_test_dyn_func(unsigned long ip,
144 unsigned long pip,
145 struct ftrace_ops *op,
146 struct pt_regs *pt_regs)
147{
148 trace_selftest_test_dyn_cnt++;
149}
150
151static struct ftrace_ops test_probe1 = {
152 .func = trace_selftest_test_probe1_func,
153 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
154};
155
156static struct ftrace_ops test_probe2 = {
157 .func = trace_selftest_test_probe2_func,
158 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
159};
160
161static struct ftrace_ops test_probe3 = {
162 .func = trace_selftest_test_probe3_func,
163 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
164};
165
166static void print_counts(void)
167{
168 printk("(%d %d %d %d %d) ",
169 trace_selftest_test_probe1_cnt,
170 trace_selftest_test_probe2_cnt,
171 trace_selftest_test_probe3_cnt,
172 trace_selftest_test_global_cnt,
173 trace_selftest_test_dyn_cnt);
174}
175
176static void reset_counts(void)
177{
178 trace_selftest_test_probe1_cnt = 0;
179 trace_selftest_test_probe2_cnt = 0;
180 trace_selftest_test_probe3_cnt = 0;
181 trace_selftest_test_global_cnt = 0;
182 trace_selftest_test_dyn_cnt = 0;
183}
184
185static int trace_selftest_ops(struct trace_array *tr, int cnt)
186{
187 int save_ftrace_enabled = ftrace_enabled;
188 struct ftrace_ops *dyn_ops;
189 char *func1_name;
190 char *func2_name;
191 int len1;
192 int len2;
193 int ret = -1;
194
195 printk(KERN_CONT "PASSED\n");
196 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
197
198 ftrace_enabled = 1;
199 reset_counts();
200
201 /* Handle PPC64 '.' name */
202 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
203 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
204 len1 = strlen(func1_name);
205 len2 = strlen(func2_name);
206
207 /*
208 * Probe 1 will trace function 1.
209 * Probe 2 will trace function 2.
210 * Probe 3 will trace functions 1 and 2.
211 */
212 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
213 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
214 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
215 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
216
217 register_ftrace_function(&test_probe1);
218 register_ftrace_function(&test_probe2);
219 register_ftrace_function(&test_probe3);
220 /* First time we are running with main function */
221 if (cnt > 1) {
222 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
223 register_ftrace_function(tr->ops);
224 }
225
226 DYN_FTRACE_TEST_NAME();
227
228 print_counts();
229
230 if (trace_selftest_test_probe1_cnt != 1)
231 goto out;
232 if (trace_selftest_test_probe2_cnt != 0)
233 goto out;
234 if (trace_selftest_test_probe3_cnt != 1)
235 goto out;
236 if (cnt > 1) {
237 if (trace_selftest_test_global_cnt == 0)
238 goto out;
239 }
240
241 DYN_FTRACE_TEST_NAME2();
242
243 print_counts();
244
245 if (trace_selftest_test_probe1_cnt != 1)
246 goto out;
247 if (trace_selftest_test_probe2_cnt != 1)
248 goto out;
249 if (trace_selftest_test_probe3_cnt != 2)
250 goto out;
251
252 /* Add a dynamic probe */
253 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
254 if (!dyn_ops) {
255 printk("MEMORY ERROR ");
256 goto out;
257 }
258
259 dyn_ops->func = trace_selftest_test_dyn_func;
260
261 register_ftrace_function(dyn_ops);
262
263 trace_selftest_test_global_cnt = 0;
264
265 DYN_FTRACE_TEST_NAME();
266
267 print_counts();
268
269 if (trace_selftest_test_probe1_cnt != 2)
270 goto out_free;
271 if (trace_selftest_test_probe2_cnt != 1)
272 goto out_free;
273 if (trace_selftest_test_probe3_cnt != 3)
274 goto out_free;
275 if (cnt > 1) {
276 if (trace_selftest_test_global_cnt == 0)
277 goto out_free;
278 }
279 if (trace_selftest_test_dyn_cnt == 0)
280 goto out_free;
281
282 DYN_FTRACE_TEST_NAME2();
283
284 print_counts();
285
286 if (trace_selftest_test_probe1_cnt != 2)
287 goto out_free;
288 if (trace_selftest_test_probe2_cnt != 2)
289 goto out_free;
290 if (trace_selftest_test_probe3_cnt != 4)
291 goto out_free;
292
293 ret = 0;
294 out_free:
295 unregister_ftrace_function(dyn_ops);
296 kfree(dyn_ops);
297
298 out:
299 /* Purposely unregister in the same order */
300 unregister_ftrace_function(&test_probe1);
301 unregister_ftrace_function(&test_probe2);
302 unregister_ftrace_function(&test_probe3);
303 if (cnt > 1)
304 unregister_ftrace_function(tr->ops);
305 ftrace_reset_array_ops(tr);
306
307 /* Make sure everything is off */
308 reset_counts();
309 DYN_FTRACE_TEST_NAME();
310 DYN_FTRACE_TEST_NAME();
311
312 if (trace_selftest_test_probe1_cnt ||
313 trace_selftest_test_probe2_cnt ||
314 trace_selftest_test_probe3_cnt ||
315 trace_selftest_test_global_cnt ||
316 trace_selftest_test_dyn_cnt)
317 ret = -1;
318
319 ftrace_enabled = save_ftrace_enabled;
320
321 return ret;
322}
323
324/* Test dynamic code modification and ftrace filters */
325static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
326 struct trace_array *tr,
327 int (*func)(void))
328{
329 int save_ftrace_enabled = ftrace_enabled;
330 unsigned long count;
331 char *func_name;
332 int ret;
333
334 /* The ftrace test PASSED */
335 printk(KERN_CONT "PASSED\n");
336 pr_info("Testing dynamic ftrace: ");
337
338 /* enable tracing, and record the filter function */
339 ftrace_enabled = 1;
340
341 /* passed in by parameter to fool gcc from optimizing */
342 func();
343
344 /*
345 * Some archs *cough*PowerPC*cough* add characters to the
346 * start of the function names. We simply put a '*' to
347 * accommodate them.
348 */
349 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
350
351 /* filter only on our function */
352 ftrace_set_global_filter(func_name, strlen(func_name), 1);
353
354 /* enable tracing */
355 ret = tracer_init(trace, tr);
356 if (ret) {
357 warn_failed_init_tracer(trace, ret);
358 goto out;
359 }
360
361 /* Sleep for a 1/10 of a second */
362 msleep(100);
363
364 /* we should have nothing in the buffer */
365 ret = trace_test_buffer(&tr->trace_buffer, &count);
366 if (ret)
367 goto out;
368
369 if (count) {
370 ret = -1;
371 printk(KERN_CONT ".. filter did not filter .. ");
372 goto out;
373 }
374
375 /* call our function again */
376 func();
377
378 /* sleep again */
379 msleep(100);
380
381 /* stop the tracing. */
382 tracing_stop();
383 ftrace_enabled = 0;
384
385 /* check the trace buffer */
386 ret = trace_test_buffer(&tr->trace_buffer, &count);
387
388 ftrace_enabled = 1;
389 tracing_start();
390
391 /* we should only have one item */
392 if (!ret && count != 1) {
393 trace->reset(tr);
394 printk(KERN_CONT ".. filter failed count=%ld ..", count);
395 ret = -1;
396 goto out;
397 }
398
399 /* Test the ops with global tracing running */
400 ret = trace_selftest_ops(tr, 1);
401 trace->reset(tr);
402
403 out:
404 ftrace_enabled = save_ftrace_enabled;
405
406 /* Enable tracing on all functions again */
407 ftrace_set_global_filter(NULL, 0, 1);
408
409 /* Test the ops with global tracing off */
410 if (!ret)
411 ret = trace_selftest_ops(tr, 2);
412
413 return ret;
414}
415
416static int trace_selftest_recursion_cnt;
417static void trace_selftest_test_recursion_func(unsigned long ip,
418 unsigned long pip,
419 struct ftrace_ops *op,
420 struct pt_regs *pt_regs)
421{
422 /*
423 * This function is registered without the recursion safe flag.
424 * The ftrace infrastructure should provide the recursion
425 * protection. If not, this will crash the kernel!
426 */
427 if (trace_selftest_recursion_cnt++ > 10)
428 return;
429 DYN_FTRACE_TEST_NAME();
430}
431
432static void trace_selftest_test_recursion_safe_func(unsigned long ip,
433 unsigned long pip,
434 struct ftrace_ops *op,
435 struct pt_regs *pt_regs)
436{
437 /*
438 * We said we would provide our own recursion. By calling
439 * this function again, we should recurse back into this function
440 * and count again. But this only happens if the arch supports
441 * all of ftrace features and nothing else is using the function
442 * tracing utility.
443 */
444 if (trace_selftest_recursion_cnt++)
445 return;
446 DYN_FTRACE_TEST_NAME();
447}
448
449static struct ftrace_ops test_rec_probe = {
450 .func = trace_selftest_test_recursion_func,
451};
452
453static struct ftrace_ops test_recsafe_probe = {
454 .func = trace_selftest_test_recursion_safe_func,
455 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
456};
457
458static int
459trace_selftest_function_recursion(void)
460{
461 int save_ftrace_enabled = ftrace_enabled;
462 char *func_name;
463 int len;
464 int ret;
465
466 /* The previous test PASSED */
467 pr_cont("PASSED\n");
468 pr_info("Testing ftrace recursion: ");
469
470
471 /* enable tracing, and record the filter function */
472 ftrace_enabled = 1;
473
474 /* Handle PPC64 '.' name */
475 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
476 len = strlen(func_name);
477
478 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
479 if (ret) {
480 pr_cont("*Could not set filter* ");
481 goto out;
482 }
483
484 ret = register_ftrace_function(&test_rec_probe);
485 if (ret) {
486 pr_cont("*could not register callback* ");
487 goto out;
488 }
489
490 DYN_FTRACE_TEST_NAME();
491
492 unregister_ftrace_function(&test_rec_probe);
493
494 ret = -1;
495 if (trace_selftest_recursion_cnt != 1) {
496 pr_cont("*callback not called once (%d)* ",
497 trace_selftest_recursion_cnt);
498 goto out;
499 }
500
501 trace_selftest_recursion_cnt = 1;
502
503 pr_cont("PASSED\n");
504 pr_info("Testing ftrace recursion safe: ");
505
506 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
507 if (ret) {
508 pr_cont("*Could not set filter* ");
509 goto out;
510 }
511
512 ret = register_ftrace_function(&test_recsafe_probe);
513 if (ret) {
514 pr_cont("*could not register callback* ");
515 goto out;
516 }
517
518 DYN_FTRACE_TEST_NAME();
519
520 unregister_ftrace_function(&test_recsafe_probe);
521
522 ret = -1;
523 if (trace_selftest_recursion_cnt != 2) {
524 pr_cont("*callback not called expected 2 times (%d)* ",
525 trace_selftest_recursion_cnt);
526 goto out;
527 }
528
529 ret = 0;
530out:
531 ftrace_enabled = save_ftrace_enabled;
532
533 return ret;
534}
535#else
536# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
537# define trace_selftest_function_recursion() ({ 0; })
538#endif /* CONFIG_DYNAMIC_FTRACE */
539
540static enum {
541 TRACE_SELFTEST_REGS_START,
542 TRACE_SELFTEST_REGS_FOUND,
543 TRACE_SELFTEST_REGS_NOT_FOUND,
544} trace_selftest_regs_stat;
545
546static void trace_selftest_test_regs_func(unsigned long ip,
547 unsigned long pip,
548 struct ftrace_ops *op,
549 struct pt_regs *pt_regs)
550{
551 if (pt_regs)
552 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
553 else
554 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
555}
556
557static struct ftrace_ops test_regs_probe = {
558 .func = trace_selftest_test_regs_func,
559 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
560};
561
562static int
563trace_selftest_function_regs(void)
564{
565 int save_ftrace_enabled = ftrace_enabled;
566 char *func_name;
567 int len;
568 int ret;
569 int supported = 0;
570
571#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
572 supported = 1;
573#endif
574
575 /* The previous test PASSED */
576 pr_cont("PASSED\n");
577 pr_info("Testing ftrace regs%s: ",
578 !supported ? "(no arch support)" : "");
579
580 /* enable tracing, and record the filter function */
581 ftrace_enabled = 1;
582
583 /* Handle PPC64 '.' name */
584 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
585 len = strlen(func_name);
586
587 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
588 /*
589 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
590 * This test really doesn't care.
591 */
592 if (ret && ret != -ENODEV) {
593 pr_cont("*Could not set filter* ");
594 goto out;
595 }
596
597 ret = register_ftrace_function(&test_regs_probe);
598 /*
599 * Now if the arch does not support passing regs, then this should
600 * have failed.
601 */
602 if (!supported) {
603 if (!ret) {
604 pr_cont("*registered save-regs without arch support* ");
605 goto out;
606 }
607 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
608 ret = register_ftrace_function(&test_regs_probe);
609 }
610 if (ret) {
611 pr_cont("*could not register callback* ");
612 goto out;
613 }
614
615
616 DYN_FTRACE_TEST_NAME();
617
618 unregister_ftrace_function(&test_regs_probe);
619
620 ret = -1;
621
622 switch (trace_selftest_regs_stat) {
623 case TRACE_SELFTEST_REGS_START:
624 pr_cont("*callback never called* ");
625 goto out;
626
627 case TRACE_SELFTEST_REGS_FOUND:
628 if (supported)
629 break;
630 pr_cont("*callback received regs without arch support* ");
631 goto out;
632
633 case TRACE_SELFTEST_REGS_NOT_FOUND:
634 if (!supported)
635 break;
636 pr_cont("*callback received NULL regs* ");
637 goto out;
638 }
639
640 ret = 0;
641out:
642 ftrace_enabled = save_ftrace_enabled;
643
644 return ret;
645}
646
647/*
648 * Simple verification test of ftrace function tracer.
649 * Enable ftrace, sleep 1/10 second, and then read the trace
650 * buffer to see if all is in order.
651 */
652__init int
653trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
654{
655 int save_ftrace_enabled = ftrace_enabled;
656 unsigned long count;
657 int ret;
658
659#ifdef CONFIG_DYNAMIC_FTRACE
660 if (ftrace_filter_param) {
661 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
662 return 0;
663 }
664#endif
665
666 /* make sure msleep has been recorded */
667 msleep(1);
668
669 /* start the tracing */
670 ftrace_enabled = 1;
671
672 ret = tracer_init(trace, tr);
673 if (ret) {
674 warn_failed_init_tracer(trace, ret);
675 goto out;
676 }
677
678 /* Sleep for a 1/10 of a second */
679 msleep(100);
680 /* stop the tracing. */
681 tracing_stop();
682 ftrace_enabled = 0;
683
684 /* check the trace buffer */
685 ret = trace_test_buffer(&tr->trace_buffer, &count);
686
687 ftrace_enabled = 1;
688 trace->reset(tr);
689 tracing_start();
690
691 if (!ret && !count) {
692 printk(KERN_CONT ".. no entries found ..");
693 ret = -1;
694 goto out;
695 }
696
697 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
698 DYN_FTRACE_TEST_NAME);
699 if (ret)
700 goto out;
701
702 ret = trace_selftest_function_recursion();
703 if (ret)
704 goto out;
705
706 ret = trace_selftest_function_regs();
707 out:
708 ftrace_enabled = save_ftrace_enabled;
709
710 /* kill ftrace totally if we failed */
711 if (ret)
712 ftrace_kill();
713
714 return ret;
715}
716#endif /* CONFIG_FUNCTION_TRACER */
717
718
719#ifdef CONFIG_FUNCTION_GRAPH_TRACER
720
721/* Maximum number of functions to trace before diagnosing a hang */
722#define GRAPH_MAX_FUNC_TEST 100000000
723
724static unsigned int graph_hang_thresh;
725
726/* Wrap the real function entry probe to avoid possible hanging */
727static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
728{
729 /* This is harmlessly racy, we want to approximately detect a hang */
730 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
731 ftrace_graph_stop();
732 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
733 if (ftrace_dump_on_oops) {
734 ftrace_dump(DUMP_ALL);
735 /* ftrace_dump() disables tracing */
736 tracing_on();
737 }
738 return 0;
739 }
740
741 return trace_graph_entry(trace);
742}
743
744static struct fgraph_ops fgraph_ops __initdata = {
745 .entryfunc = &trace_graph_entry_watchdog,
746 .retfunc = &trace_graph_return,
747};
748
749/*
750 * Pretty much the same than for the function tracer from which the selftest
751 * has been borrowed.
752 */
753__init int
754trace_selftest_startup_function_graph(struct tracer *trace,
755 struct trace_array *tr)
756{
757 int ret;
758 unsigned long count;
759
760#ifdef CONFIG_DYNAMIC_FTRACE
761 if (ftrace_filter_param) {
762 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
763 return 0;
764 }
765#endif
766
767 /*
768 * Simulate the init() callback but we attach a watchdog callback
769 * to detect and recover from possible hangs
770 */
771 tracing_reset_online_cpus(&tr->trace_buffer);
772 set_graph_array(tr);
773 ret = register_ftrace_graph(&fgraph_ops);
774 if (ret) {
775 warn_failed_init_tracer(trace, ret);
776 goto out;
777 }
778 tracing_start_cmdline_record();
779
780 /* Sleep for a 1/10 of a second */
781 msleep(100);
782
783 /* Have we just recovered from a hang? */
784 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
785 tracing_selftest_disabled = true;
786 ret = -1;
787 goto out;
788 }
789
790 tracing_stop();
791
792 /* check the trace buffer */
793 ret = trace_test_buffer(&tr->trace_buffer, &count);
794
795 trace->reset(tr);
796 tracing_start();
797
798 if (!ret && !count) {
799 printk(KERN_CONT ".. no entries found ..");
800 ret = -1;
801 goto out;
802 }
803
804 /* Don't test dynamic tracing, the function tracer already did */
805
806out:
807 /* Stop it if we failed */
808 if (ret)
809 ftrace_graph_stop();
810
811 return ret;
812}
813#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
814
815
816#ifdef CONFIG_IRQSOFF_TRACER
817int
818trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
819{
820 unsigned long save_max = tr->max_latency;
821 unsigned long count;
822 int ret;
823
824 /* start the tracing */
825 ret = tracer_init(trace, tr);
826 if (ret) {
827 warn_failed_init_tracer(trace, ret);
828 return ret;
829 }
830
831 /* reset the max latency */
832 tr->max_latency = 0;
833 /* disable interrupts for a bit */
834 local_irq_disable();
835 udelay(100);
836 local_irq_enable();
837
838 /*
839 * Stop the tracer to avoid a warning subsequent
840 * to buffer flipping failure because tracing_stop()
841 * disables the tr and max buffers, making flipping impossible
842 * in case of parallels max irqs off latencies.
843 */
844 trace->stop(tr);
845 /* stop the tracing. */
846 tracing_stop();
847 /* check both trace buffers */
848 ret = trace_test_buffer(&tr->trace_buffer, NULL);
849 if (!ret)
850 ret = trace_test_buffer(&tr->max_buffer, &count);
851 trace->reset(tr);
852 tracing_start();
853
854 if (!ret && !count) {
855 printk(KERN_CONT ".. no entries found ..");
856 ret = -1;
857 }
858
859 tr->max_latency = save_max;
860
861 return ret;
862}
863#endif /* CONFIG_IRQSOFF_TRACER */
864
865#ifdef CONFIG_PREEMPT_TRACER
866int
867trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
868{
869 unsigned long save_max = tr->max_latency;
870 unsigned long count;
871 int ret;
872
873 /*
874 * Now that the big kernel lock is no longer preemptable,
875 * and this is called with the BKL held, it will always
876 * fail. If preemption is already disabled, simply
877 * pass the test. When the BKL is removed, or becomes
878 * preemptible again, we will once again test this,
879 * so keep it in.
880 */
881 if (preempt_count()) {
882 printk(KERN_CONT "can not test ... force ");
883 return 0;
884 }
885
886 /* start the tracing */
887 ret = tracer_init(trace, tr);
888 if (ret) {
889 warn_failed_init_tracer(trace, ret);
890 return ret;
891 }
892
893 /* reset the max latency */
894 tr->max_latency = 0;
895 /* disable preemption for a bit */
896 preempt_disable();
897 udelay(100);
898 preempt_enable();
899
900 /*
901 * Stop the tracer to avoid a warning subsequent
902 * to buffer flipping failure because tracing_stop()
903 * disables the tr and max buffers, making flipping impossible
904 * in case of parallels max preempt off latencies.
905 */
906 trace->stop(tr);
907 /* stop the tracing. */
908 tracing_stop();
909 /* check both trace buffers */
910 ret = trace_test_buffer(&tr->trace_buffer, NULL);
911 if (!ret)
912 ret = trace_test_buffer(&tr->max_buffer, &count);
913 trace->reset(tr);
914 tracing_start();
915
916 if (!ret && !count) {
917 printk(KERN_CONT ".. no entries found ..");
918 ret = -1;
919 }
920
921 tr->max_latency = save_max;
922
923 return ret;
924}
925#endif /* CONFIG_PREEMPT_TRACER */
926
927#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
928int
929trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
930{
931 unsigned long save_max = tr->max_latency;
932 unsigned long count;
933 int ret;
934
935 /*
936 * Now that the big kernel lock is no longer preemptable,
937 * and this is called with the BKL held, it will always
938 * fail. If preemption is already disabled, simply
939 * pass the test. When the BKL is removed, or becomes
940 * preemptible again, we will once again test this,
941 * so keep it in.
942 */
943 if (preempt_count()) {
944 printk(KERN_CONT "can not test ... force ");
945 return 0;
946 }
947
948 /* start the tracing */
949 ret = tracer_init(trace, tr);
950 if (ret) {
951 warn_failed_init_tracer(trace, ret);
952 goto out_no_start;
953 }
954
955 /* reset the max latency */
956 tr->max_latency = 0;
957
958 /* disable preemption and interrupts for a bit */
959 preempt_disable();
960 local_irq_disable();
961 udelay(100);
962 preempt_enable();
963 /* reverse the order of preempt vs irqs */
964 local_irq_enable();
965
966 /*
967 * Stop the tracer to avoid a warning subsequent
968 * to buffer flipping failure because tracing_stop()
969 * disables the tr and max buffers, making flipping impossible
970 * in case of parallels max irqs/preempt off latencies.
971 */
972 trace->stop(tr);
973 /* stop the tracing. */
974 tracing_stop();
975 /* check both trace buffers */
976 ret = trace_test_buffer(&tr->trace_buffer, NULL);
977 if (ret)
978 goto out;
979
980 ret = trace_test_buffer(&tr->max_buffer, &count);
981 if (ret)
982 goto out;
983
984 if (!ret && !count) {
985 printk(KERN_CONT ".. no entries found ..");
986 ret = -1;
987 goto out;
988 }
989
990 /* do the test by disabling interrupts first this time */
991 tr->max_latency = 0;
992 tracing_start();
993 trace->start(tr);
994
995 preempt_disable();
996 local_irq_disable();
997 udelay(100);
998 preempt_enable();
999 /* reverse the order of preempt vs irqs */
1000 local_irq_enable();
1001
1002 trace->stop(tr);
1003 /* stop the tracing. */
1004 tracing_stop();
1005 /* check both trace buffers */
1006 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1007 if (ret)
1008 goto out;
1009
1010 ret = trace_test_buffer(&tr->max_buffer, &count);
1011
1012 if (!ret && !count) {
1013 printk(KERN_CONT ".. no entries found ..");
1014 ret = -1;
1015 goto out;
1016 }
1017
1018out:
1019 tracing_start();
1020out_no_start:
1021 trace->reset(tr);
1022 tr->max_latency = save_max;
1023
1024 return ret;
1025}
1026#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1027
1028#ifdef CONFIG_NOP_TRACER
1029int
1030trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1031{
1032 /* What could possibly go wrong? */
1033 return 0;
1034}
1035#endif
1036
1037#ifdef CONFIG_SCHED_TRACER
1038
1039struct wakeup_test_data {
1040 struct completion is_ready;
1041 int go;
1042};
1043
1044static int trace_wakeup_test_thread(void *data)
1045{
1046 /* Make this a -deadline thread */
1047 static const struct sched_attr attr = {
1048 .sched_policy = SCHED_DEADLINE,
1049 .sched_runtime = 100000ULL,
1050 .sched_deadline = 10000000ULL,
1051 .sched_period = 10000000ULL
1052 };
1053 struct wakeup_test_data *x = data;
1054
1055 sched_setattr(current, &attr);
1056
1057 /* Make it know we have a new prio */
1058 complete(&x->is_ready);
1059
1060 /* now go to sleep and let the test wake us up */
1061 set_current_state(TASK_INTERRUPTIBLE);
1062 while (!x->go) {
1063 schedule();
1064 set_current_state(TASK_INTERRUPTIBLE);
1065 }
1066
1067 complete(&x->is_ready);
1068
1069 set_current_state(TASK_INTERRUPTIBLE);
1070
1071 /* we are awake, now wait to disappear */
1072 while (!kthread_should_stop()) {
1073 schedule();
1074 set_current_state(TASK_INTERRUPTIBLE);
1075 }
1076
1077 __set_current_state(TASK_RUNNING);
1078
1079 return 0;
1080}
1081int
1082trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1083{
1084 unsigned long save_max = tr->max_latency;
1085 struct task_struct *p;
1086 struct wakeup_test_data data;
1087 unsigned long count;
1088 int ret;
1089
1090 memset(&data, 0, sizeof(data));
1091
1092 init_completion(&data.is_ready);
1093
1094 /* create a -deadline thread */
1095 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1096 if (IS_ERR(p)) {
1097 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1098 return -1;
1099 }
1100
1101 /* make sure the thread is running at -deadline policy */
1102 wait_for_completion(&data.is_ready);
1103
1104 /* start the tracing */
1105 ret = tracer_init(trace, tr);
1106 if (ret) {
1107 warn_failed_init_tracer(trace, ret);
1108 return ret;
1109 }
1110
1111 /* reset the max latency */
1112 tr->max_latency = 0;
1113
1114 while (p->on_rq) {
1115 /*
1116 * Sleep to make sure the -deadline thread is asleep too.
1117 * On virtual machines we can't rely on timings,
1118 * but we want to make sure this test still works.
1119 */
1120 msleep(100);
1121 }
1122
1123 init_completion(&data.is_ready);
1124
1125 data.go = 1;
1126 /* memory barrier is in the wake_up_process() */
1127
1128 wake_up_process(p);
1129
1130 /* Wait for the task to wake up */
1131 wait_for_completion(&data.is_ready);
1132
1133 /* stop the tracing. */
1134 tracing_stop();
1135 /* check both trace buffers */
1136 ret = trace_test_buffer(&tr->trace_buffer, NULL);
1137 if (!ret)
1138 ret = trace_test_buffer(&tr->max_buffer, &count);
1139
1140
1141 trace->reset(tr);
1142 tracing_start();
1143
1144 tr->max_latency = save_max;
1145
1146 /* kill the thread */
1147 kthread_stop(p);
1148
1149 if (!ret && !count) {
1150 printk(KERN_CONT ".. no entries found ..");
1151 ret = -1;
1152 }
1153
1154 return ret;
1155}
1156#endif /* CONFIG_SCHED_TRACER */
1157
1158#ifdef CONFIG_BRANCH_TRACER
1159int
1160trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1161{
1162 unsigned long count;
1163 int ret;
1164
1165 /* start the tracing */
1166 ret = tracer_init(trace, tr);
1167 if (ret) {
1168 warn_failed_init_tracer(trace, ret);
1169 return ret;
1170 }
1171
1172 /* Sleep for a 1/10 of a second */
1173 msleep(100);
1174 /* stop the tracing. */
1175 tracing_stop();
1176 /* check the trace buffer */
1177 ret = trace_test_buffer(&tr->trace_buffer, &count);
1178 trace->reset(tr);
1179 tracing_start();
1180
1181 if (!ret && !count) {
1182 printk(KERN_CONT ".. no entries found ..");
1183 ret = -1;
1184 }
1185
1186 return ret;
1187}
1188#endif /* CONFIG_BRANCH_TRACER */
1189
1190