1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
27static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1;
29static int wakeup_rt;
30static int wakeup_dl;
31static int tracing_dl = 0;
32
33static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38static int start_func_tracer(struct trace_array *tr, int graph);
39static void stop_func_tracer(struct trace_array *tr, int graph);
40
41static int save_flags;
42
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45#else
46# define is_graph(tr) false
47#endif
48
49#ifdef CONFIG_FUNCTION_TRACER
50
51static bool function_enabled;
52
53/*
54 * Prologue for the wakeup function tracers.
55 *
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
60 * kept the same.
61 *
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
66 */
67static int
68func_prolog_preempt_disable(struct trace_array *tr,
69 struct trace_array_cpu **data,
70 int *pc)
71{
72 long disabled;
73 int cpu;
74
75 if (likely(!wakeup_task))
76 return 0;
77
78 *pc = preempt_count();
79 preempt_disable_notrace();
80
81 cpu = raw_smp_processor_id();
82 if (cpu != wakeup_current_cpu)
83 goto out_enable;
84
85 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
86 disabled = atomic_inc_return(&(*data)->disabled);
87 if (unlikely(disabled != 1))
88 goto out;
89
90 return 1;
91
92out:
93 atomic_dec(&(*data)->disabled);
94
95out_enable:
96 preempt_enable_notrace();
97 return 0;
98}
99
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104 if (!(is_graph(tr) ^ set))
105 return 0;
106
107 stop_func_tracer(tr, !set);
108
109 wakeup_reset(wakeup_trace);
110 tr->max_latency = 0;
111
112 return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116{
117 struct trace_array *tr = wakeup_trace;
118 struct trace_array_cpu *data;
119 unsigned long flags;
120 int pc, ret = 0;
121
122 if (ftrace_graph_ignore_func(trace))
123 return 0;
124 /*
125 * Do not trace a function if it's filtered by set_graph_notrace.
126 * Make the index of ret stack negative to indicate that it should
127 * ignore further functions. But it needs its own ret stack entry
128 * to recover the original index in order to continue tracing after
129 * returning from the function.
130 */
131 if (ftrace_graph_notrace_addr(trace->func))
132 return 1;
133
134 if (!func_prolog_preempt_disable(tr, &data, &pc))
135 return 0;
136
137 local_save_flags(flags);
138 ret = __trace_graph_entry(tr, trace, flags, pc);
139 atomic_dec(&data->disabled);
140 preempt_enable_notrace();
141
142 return ret;
143}
144
145static void wakeup_graph_return(struct ftrace_graph_ret *trace)
146{
147 struct trace_array *tr = wakeup_trace;
148 struct trace_array_cpu *data;
149 unsigned long flags;
150 int pc;
151
152 ftrace_graph_addr_finish(trace);
153
154 if (!func_prolog_preempt_disable(tr, &data, &pc))
155 return;
156
157 local_save_flags(flags);
158 __trace_graph_return(tr, trace, flags, pc);
159 atomic_dec(&data->disabled);
160
161 preempt_enable_notrace();
162 return;
163}
164
165static struct fgraph_ops fgraph_wakeup_ops = {
166 .entryfunc = &wakeup_graph_entry,
167 .retfunc = &wakeup_graph_return,
168};
169
170static void wakeup_trace_open(struct trace_iterator *iter)
171{
172 if (is_graph(iter->tr))
173 graph_trace_open(iter);
174}
175
176static void wakeup_trace_close(struct trace_iterator *iter)
177{
178 if (iter->private)
179 graph_trace_close(iter);
180}
181
182#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
183 TRACE_GRAPH_PRINT_CPU | \
184 TRACE_GRAPH_PRINT_REL_TIME | \
185 TRACE_GRAPH_PRINT_DURATION | \
186 TRACE_GRAPH_PRINT_OVERHEAD | \
187 TRACE_GRAPH_PRINT_IRQS)
188
189static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
190{
191 /*
192 * In graph mode call the graph tracer output function,
193 * otherwise go with the TRACE_FN event handler
194 */
195 if (is_graph(iter->tr))
196 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
197
198 return TRACE_TYPE_UNHANDLED;
199}
200
201static void wakeup_print_header(struct seq_file *s)
202{
203 if (is_graph(wakeup_trace))
204 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
205 else
206 trace_default_header(s);
207}
208#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
209
210/*
211 * wakeup uses its own tracer function to keep the overhead down:
212 */
213static void
214wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
215 struct ftrace_ops *op, struct pt_regs *pt_regs)
216{
217 struct trace_array *tr = wakeup_trace;
218 struct trace_array_cpu *data;
219 unsigned long flags;
220 int pc;
221
222 if (!func_prolog_preempt_disable(tr, &data, &pc))
223 return;
224
225 local_irq_save(flags);
226 trace_function(tr, ip, parent_ip, flags, pc);
227 local_irq_restore(flags);
228
229 atomic_dec(&data->disabled);
230 preempt_enable_notrace();
231}
232
233static int register_wakeup_function(struct trace_array *tr, int graph, int set)
234{
235 int ret;
236
237 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
238 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
239 return 0;
240
241 if (graph)
242 ret = register_ftrace_graph(&fgraph_wakeup_ops);
243 else
244 ret = register_ftrace_function(tr->ops);
245
246 if (!ret)
247 function_enabled = true;
248
249 return ret;
250}
251
252static void unregister_wakeup_function(struct trace_array *tr, int graph)
253{
254 if (!function_enabled)
255 return;
256
257 if (graph)
258 unregister_ftrace_graph(&fgraph_wakeup_ops);
259 else
260 unregister_ftrace_function(tr->ops);
261
262 function_enabled = false;
263}
264
265static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
266{
267 if (!(mask & TRACE_ITER_FUNCTION))
268 return 0;
269
270 if (set)
271 register_wakeup_function(tr, is_graph(tr), 1);
272 else
273 unregister_wakeup_function(tr, is_graph(tr));
274 return 1;
275}
276#else /* CONFIG_FUNCTION_TRACER */
277static int register_wakeup_function(struct trace_array *tr, int graph, int set)
278{
279 return 0;
280}
281static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
282static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
283{
284 return 0;
285}
286#endif /* else CONFIG_FUNCTION_TRACER */
287
288#ifndef CONFIG_FUNCTION_GRAPH_TRACER
289static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
290{
291 return TRACE_TYPE_UNHANDLED;
292}
293
294static void wakeup_trace_open(struct trace_iterator *iter) { }
295static void wakeup_trace_close(struct trace_iterator *iter) { }
296
297static void wakeup_print_header(struct seq_file *s)
298{
299 trace_default_header(s);
300}
301#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
302
303static void
304__trace_function(struct trace_array *tr,
305 unsigned long ip, unsigned long parent_ip,
306 unsigned long flags, int pc)
307{
308 if (is_graph(tr))
309 trace_graph_function(tr, ip, parent_ip, flags, pc);
310 else
311 trace_function(tr, ip, parent_ip, flags, pc);
312}
313
314static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
315{
316 struct tracer *tracer = tr->current_trace;
317
318 if (wakeup_function_set(tr, mask, set))
319 return 0;
320
321#ifdef CONFIG_FUNCTION_GRAPH_TRACER
322 if (mask & TRACE_ITER_DISPLAY_GRAPH)
323 return wakeup_display_graph(tr, set);
324#endif
325
326 return trace_keep_overwrite(tracer, mask, set);
327}
328
329static int start_func_tracer(struct trace_array *tr, int graph)
330{
331 int ret;
332
333 ret = register_wakeup_function(tr, graph, 0);
334
335 if (!ret && tracing_is_enabled())
336 tracer_enabled = 1;
337 else
338 tracer_enabled = 0;
339
340 return ret;
341}
342
343static void stop_func_tracer(struct trace_array *tr, int graph)
344{
345 tracer_enabled = 0;
346
347 unregister_wakeup_function(tr, graph);
348}
349
350/*
351 * Should this new latency be reported/recorded?
352 */
353static bool report_latency(struct trace_array *tr, u64 delta)
354{
355 if (tracing_thresh) {
356 if (delta < tracing_thresh)
357 return false;
358 } else {
359 if (delta <= tr->max_latency)
360 return false;
361 }
362 return true;
363}
364
365static void
366probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
367{
368 if (task != wakeup_task)
369 return;
370
371 wakeup_current_cpu = cpu;
372}
373
374static void
375tracing_sched_switch_trace(struct trace_array *tr,
376 struct task_struct *prev,
377 struct task_struct *next,
378 unsigned long flags, int pc)
379{
380 struct trace_event_call *call = &event_context_switch;
381 struct ring_buffer *buffer = tr->trace_buffer.buffer;
382 struct ring_buffer_event *event;
383 struct ctx_switch_entry *entry;
384
385 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
386 sizeof(*entry), flags, pc);
387 if (!event)
388 return;
389 entry = ring_buffer_event_data(event);
390 entry->prev_pid = prev->pid;
391 entry->prev_prio = prev->prio;
392 entry->prev_state = task_state_index(prev);
393 entry->next_pid = next->pid;
394 entry->next_prio = next->prio;
395 entry->next_state = task_state_index(next);
396 entry->next_cpu = task_cpu(next);
397
398 if (!call_filter_check_discard(call, entry, buffer, event))
399 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
400}
401
402static void
403tracing_sched_wakeup_trace(struct trace_array *tr,
404 struct task_struct *wakee,
405 struct task_struct *curr,
406 unsigned long flags, int pc)
407{
408 struct trace_event_call *call = &event_wakeup;
409 struct ring_buffer_event *event;
410 struct ctx_switch_entry *entry;
411 struct ring_buffer *buffer = tr->trace_buffer.buffer;
412
413 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
414 sizeof(*entry), flags, pc);
415 if (!event)
416 return;
417 entry = ring_buffer_event_data(event);
418 entry->prev_pid = curr->pid;
419 entry->prev_prio = curr->prio;
420 entry->prev_state = task_state_index(curr);
421 entry->next_pid = wakee->pid;
422 entry->next_prio = wakee->prio;
423 entry->next_state = task_state_index(wakee);
424 entry->next_cpu = task_cpu(wakee);
425
426 if (!call_filter_check_discard(call, entry, buffer, event))
427 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
428}
429
430static void notrace
431probe_wakeup_sched_switch(void *ignore, bool preempt,
432 struct task_struct *prev, struct task_struct *next)
433{
434 struct trace_array_cpu *data;
435 u64 T0, T1, delta;
436 unsigned long flags;
437 long disabled;
438 int cpu;
439 int pc;
440
441 tracing_record_cmdline(prev);
442
443 if (unlikely(!tracer_enabled))
444 return;
445
446 /*
447 * When we start a new trace, we set wakeup_task to NULL
448 * and then set tracer_enabled = 1. We want to make sure
449 * that another CPU does not see the tracer_enabled = 1
450 * and the wakeup_task with an older task, that might
451 * actually be the same as next.
452 */
453 smp_rmb();
454
455 if (next != wakeup_task)
456 return;
457
458 pc = preempt_count();
459
460 /* disable local data, not wakeup_cpu data */
461 cpu = raw_smp_processor_id();
462 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
463 if (likely(disabled != 1))
464 goto out;
465
466 local_irq_save(flags);
467 arch_spin_lock(&wakeup_lock);
468
469 /* We could race with grabbing wakeup_lock */
470 if (unlikely(!tracer_enabled || next != wakeup_task))
471 goto out_unlock;
472
473 /* The task we are waiting for is waking up */
474 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
475
476 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
477 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
478 __trace_stack(wakeup_trace, flags, 0, pc);
479
480 T0 = data->preempt_timestamp;
481 T1 = ftrace_now(cpu);
482 delta = T1-T0;
483
484 if (!report_latency(wakeup_trace, delta))
485 goto out_unlock;
486
487 if (likely(!is_tracing_stopped())) {
488 wakeup_trace->max_latency = delta;
489 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
490 }
491
492out_unlock:
493 __wakeup_reset(wakeup_trace);
494 arch_spin_unlock(&wakeup_lock);
495 local_irq_restore(flags);
496out:
497 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
498}
499
500static void __wakeup_reset(struct trace_array *tr)
501{
502 wakeup_cpu = -1;
503 wakeup_prio = -1;
504 tracing_dl = 0;
505
506 if (wakeup_task)
507 put_task_struct(wakeup_task);
508
509 wakeup_task = NULL;
510}
511
512static void wakeup_reset(struct trace_array *tr)
513{
514 unsigned long flags;
515
516 tracing_reset_online_cpus(&tr->trace_buffer);
517
518 local_irq_save(flags);
519 arch_spin_lock(&wakeup_lock);
520 __wakeup_reset(tr);
521 arch_spin_unlock(&wakeup_lock);
522 local_irq_restore(flags);
523}
524
525static void
526probe_wakeup(void *ignore, struct task_struct *p)
527{
528 struct trace_array_cpu *data;
529 int cpu = smp_processor_id();
530 unsigned long flags;
531 long disabled;
532 int pc;
533
534 if (likely(!tracer_enabled))
535 return;
536
537 tracing_record_cmdline(p);
538 tracing_record_cmdline(current);
539
540 /*
541 * Semantic is like this:
542 * - wakeup tracer handles all tasks in the system, independently
543 * from their scheduling class;
544 * - wakeup_rt tracer handles tasks belonging to sched_dl and
545 * sched_rt class;
546 * - wakeup_dl handles tasks belonging to sched_dl class only.
547 */
548 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
549 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
550 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
551 return;
552
553 pc = preempt_count();
554 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
555 if (unlikely(disabled != 1))
556 goto out;
557
558 /* interrupts should be off from try_to_wake_up */
559 arch_spin_lock(&wakeup_lock);
560
561 /* check for races. */
562 if (!tracer_enabled || tracing_dl ||
563 (!dl_task(p) && p->prio >= wakeup_prio))
564 goto out_locked;
565
566 /* reset the trace */
567 __wakeup_reset(wakeup_trace);
568
569 wakeup_cpu = task_cpu(p);
570 wakeup_current_cpu = wakeup_cpu;
571 wakeup_prio = p->prio;
572
573 /*
574 * Once you start tracing a -deadline task, don't bother tracing
575 * another task until the first one wakes up.
576 */
577 if (dl_task(p))
578 tracing_dl = 1;
579 else
580 tracing_dl = 0;
581
582 wakeup_task = p;
583 get_task_struct(wakeup_task);
584
585 local_save_flags(flags);
586
587 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
588 data->preempt_timestamp = ftrace_now(cpu);
589 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
590 __trace_stack(wakeup_trace, flags, 0, pc);
591
592 /*
593 * We must be careful in using CALLER_ADDR2. But since wake_up
594 * is not called by an assembly function (where as schedule is)
595 * it should be safe to use it here.
596 */
597 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
598
599out_locked:
600 arch_spin_unlock(&wakeup_lock);
601out:
602 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
603}
604
605static void start_wakeup_tracer(struct trace_array *tr)
606{
607 int ret;
608
609 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
610 if (ret) {
611 pr_info("wakeup trace: Couldn't activate tracepoint"
612 " probe to kernel_sched_wakeup\n");
613 return;
614 }
615
616 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
617 if (ret) {
618 pr_info("wakeup trace: Couldn't activate tracepoint"
619 " probe to kernel_sched_wakeup_new\n");
620 goto fail_deprobe;
621 }
622
623 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
624 if (ret) {
625 pr_info("sched trace: Couldn't activate tracepoint"
626 " probe to kernel_sched_switch\n");
627 goto fail_deprobe_wake_new;
628 }
629
630 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
631 if (ret) {
632 pr_info("wakeup trace: Couldn't activate tracepoint"
633 " probe to kernel_sched_migrate_task\n");
634 return;
635 }
636
637 wakeup_reset(tr);
638
639 /*
640 * Don't let the tracer_enabled = 1 show up before
641 * the wakeup_task is reset. This may be overkill since
642 * wakeup_reset does a spin_unlock after setting the
643 * wakeup_task to NULL, but I want to be safe.
644 * This is a slow path anyway.
645 */
646 smp_wmb();
647
648 if (start_func_tracer(tr, is_graph(tr)))
649 printk(KERN_ERR "failed to start wakeup tracer\n");
650
651 return;
652fail_deprobe_wake_new:
653 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
654fail_deprobe:
655 unregister_trace_sched_wakeup(probe_wakeup, NULL);
656}
657
658static void stop_wakeup_tracer(struct trace_array *tr)
659{
660 tracer_enabled = 0;
661 stop_func_tracer(tr, is_graph(tr));
662 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
663 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
664 unregister_trace_sched_wakeup(probe_wakeup, NULL);
665 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
666}
667
668static bool wakeup_busy;
669
670static int __wakeup_tracer_init(struct trace_array *tr)
671{
672 save_flags = tr->trace_flags;
673
674 /* non overwrite screws up the latency tracers */
675 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
676 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
677
678 tr->max_latency = 0;
679 wakeup_trace = tr;
680 ftrace_init_array_ops(tr, wakeup_tracer_call);
681 start_wakeup_tracer(tr);
682
683 wakeup_busy = true;
684 return 0;
685}
686
687static int wakeup_tracer_init(struct trace_array *tr)
688{
689 if (wakeup_busy)
690 return -EBUSY;
691
692 wakeup_dl = 0;
693 wakeup_rt = 0;
694 return __wakeup_tracer_init(tr);
695}
696
697static int wakeup_rt_tracer_init(struct trace_array *tr)
698{
699 if (wakeup_busy)
700 return -EBUSY;
701
702 wakeup_dl = 0;
703 wakeup_rt = 1;
704 return __wakeup_tracer_init(tr);
705}
706
707static int wakeup_dl_tracer_init(struct trace_array *tr)
708{
709 if (wakeup_busy)
710 return -EBUSY;
711
712 wakeup_dl = 1;
713 wakeup_rt = 0;
714 return __wakeup_tracer_init(tr);
715}
716
717static void wakeup_tracer_reset(struct trace_array *tr)
718{
719 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
720 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
721
722 stop_wakeup_tracer(tr);
723 /* make sure we put back any tasks we are tracing */
724 wakeup_reset(tr);
725
726 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
727 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
728 ftrace_reset_array_ops(tr);
729 wakeup_busy = false;
730}
731
732static void wakeup_tracer_start(struct trace_array *tr)
733{
734 wakeup_reset(tr);
735 tracer_enabled = 1;
736}
737
738static void wakeup_tracer_stop(struct trace_array *tr)
739{
740 tracer_enabled = 0;
741}
742
743static struct tracer wakeup_tracer __read_mostly =
744{
745 .name = "wakeup",
746 .init = wakeup_tracer_init,
747 .reset = wakeup_tracer_reset,
748 .start = wakeup_tracer_start,
749 .stop = wakeup_tracer_stop,
750 .print_max = true,
751 .print_header = wakeup_print_header,
752 .print_line = wakeup_print_line,
753 .flag_changed = wakeup_flag_changed,
754#ifdef CONFIG_FTRACE_SELFTEST
755 .selftest = trace_selftest_startup_wakeup,
756#endif
757 .open = wakeup_trace_open,
758 .close = wakeup_trace_close,
759 .allow_instances = true,
760 .use_max_tr = true,
761};
762
763static struct tracer wakeup_rt_tracer __read_mostly =
764{
765 .name = "wakeup_rt",
766 .init = wakeup_rt_tracer_init,
767 .reset = wakeup_tracer_reset,
768 .start = wakeup_tracer_start,
769 .stop = wakeup_tracer_stop,
770 .print_max = true,
771 .print_header = wakeup_print_header,
772 .print_line = wakeup_print_line,
773 .flag_changed = wakeup_flag_changed,
774#ifdef CONFIG_FTRACE_SELFTEST
775 .selftest = trace_selftest_startup_wakeup,
776#endif
777 .open = wakeup_trace_open,
778 .close = wakeup_trace_close,
779 .allow_instances = true,
780 .use_max_tr = true,
781};
782
783static struct tracer wakeup_dl_tracer __read_mostly =
784{
785 .name = "wakeup_dl",
786 .init = wakeup_dl_tracer_init,
787 .reset = wakeup_tracer_reset,
788 .start = wakeup_tracer_start,
789 .stop = wakeup_tracer_stop,
790 .print_max = true,
791 .print_header = wakeup_print_header,
792 .print_line = wakeup_print_line,
793 .flag_changed = wakeup_flag_changed,
794#ifdef CONFIG_FTRACE_SELFTEST
795 .selftest = trace_selftest_startup_wakeup,
796#endif
797 .open = wakeup_trace_open,
798 .close = wakeup_trace_close,
799 .allow_instances = true,
800 .use_max_tr = true,
801};
802
803__init static int init_wakeup_tracer(void)
804{
805 int ret;
806
807 ret = register_tracer(&wakeup_tracer);
808 if (ret)
809 return ret;
810
811 ret = register_tracer(&wakeup_rt_tracer);
812 if (ret)
813 return ret;
814
815 ret = register_tracer(&wakeup_dl_tracer);
816 if (ret)
817 return ret;
818
819 return 0;
820}
821core_initcall(init_wakeup_tracer);
822