1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * trace irqs off critical timings |
4 | * |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
7 | * |
8 | * From code in the latency_tracer, that is: |
9 | * |
10 | * Copyright (C) 2004-2006 Ingo Molnar |
11 | * Copyright (C) 2004 Nadia Yvette Chambers |
12 | */ |
13 | #include <linux/kallsyms.h> |
14 | #include <linux/uaccess.h> |
15 | #include <linux/module.h> |
16 | #include <linux/ftrace.h> |
17 | #include <linux/kprobes.h> |
18 | |
19 | #include "trace.h" |
20 | |
21 | #include <trace/events/preemptirq.h> |
22 | |
23 | #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) |
24 | static struct trace_array *irqsoff_trace __read_mostly; |
25 | static int tracer_enabled __read_mostly; |
26 | |
27 | static DEFINE_PER_CPU(int, tracing_cpu); |
28 | |
29 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
30 | |
31 | enum { |
32 | TRACER_IRQS_OFF = (1 << 1), |
33 | TRACER_PREEMPT_OFF = (1 << 2), |
34 | }; |
35 | |
36 | static int trace_type __read_mostly; |
37 | |
38 | static int save_flags; |
39 | |
40 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
41 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
42 | |
43 | #ifdef CONFIG_PREEMPT_TRACER |
44 | static inline int |
45 | preempt_trace(int pc) |
46 | { |
47 | return ((trace_type & TRACER_PREEMPT_OFF) && pc); |
48 | } |
49 | #else |
50 | # define preempt_trace(pc) (0) |
51 | #endif |
52 | |
53 | #ifdef CONFIG_IRQSOFF_TRACER |
54 | static inline int |
55 | irq_trace(void) |
56 | { |
57 | return ((trace_type & TRACER_IRQS_OFF) && |
58 | irqs_disabled()); |
59 | } |
60 | #else |
61 | # define irq_trace() (0) |
62 | #endif |
63 | |
64 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
65 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
66 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
67 | #else |
68 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) |
69 | { |
70 | return -EINVAL; |
71 | } |
72 | # define is_graph(tr) false |
73 | #endif |
74 | |
75 | /* |
76 | * Sequence count - we record it when starting a measurement and |
77 | * skip the latency if the sequence has changed - some other section |
78 | * did a maximum and could disturb our measurement with serial console |
79 | * printouts, etc. Truly coinciding maximum latencies should be rare |
80 | * and what happens together happens separately as well, so this doesn't |
81 | * decrease the validity of the maximum found: |
82 | */ |
83 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
84 | |
85 | #ifdef CONFIG_FUNCTION_TRACER |
86 | /* |
87 | * Prologue for the preempt and irqs off function tracers. |
88 | * |
89 | * Returns 1 if it is OK to continue, and data->disabled is |
90 | * incremented. |
91 | * 0 if the trace is to be ignored, and data->disabled |
92 | * is kept the same. |
93 | * |
94 | * Note, this function is also used outside this ifdef but |
95 | * inside the #ifdef of the function graph tracer below. |
96 | * This is OK, since the function graph tracer is |
97 | * dependent on the function tracer. |
98 | */ |
99 | static int func_prolog_dec(struct trace_array *tr, |
100 | struct trace_array_cpu **data, |
101 | unsigned long *flags) |
102 | { |
103 | long disabled; |
104 | int cpu; |
105 | |
106 | /* |
107 | * Does not matter if we preempt. We test the flags |
108 | * afterward, to see if irqs are disabled or not. |
109 | * If we preempt and get a false positive, the flags |
110 | * test will fail. |
111 | */ |
112 | cpu = raw_smp_processor_id(); |
113 | if (likely(!per_cpu(tracing_cpu, cpu))) |
114 | return 0; |
115 | |
116 | local_save_flags(*flags); |
117 | /* |
118 | * Slight chance to get a false positive on tracing_cpu, |
119 | * although I'm starting to think there isn't a chance. |
120 | * Leave this for now just to be paranoid. |
121 | */ |
122 | if (!irqs_disabled_flags(*flags) && !preempt_count()) |
123 | return 0; |
124 | |
125 | *data = per_cpu_ptr(tr->array_buffer.data, cpu); |
126 | disabled = atomic_inc_return(v: &(*data)->disabled); |
127 | |
128 | if (likely(disabled == 1)) |
129 | return 1; |
130 | |
131 | atomic_dec(v: &(*data)->disabled); |
132 | |
133 | return 0; |
134 | } |
135 | |
136 | /* |
137 | * irqsoff uses its own tracer function to keep the overhead down: |
138 | */ |
139 | static void |
140 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
141 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
142 | { |
143 | struct trace_array *tr = irqsoff_trace; |
144 | struct trace_array_cpu *data; |
145 | unsigned long flags; |
146 | unsigned int trace_ctx; |
147 | |
148 | if (!func_prolog_dec(tr, data: &data, flags: &flags)) |
149 | return; |
150 | |
151 | trace_ctx = tracing_gen_ctx_flags(irqflags: flags); |
152 | |
153 | trace_function(tr, ip, parent_ip, trace_ctx); |
154 | |
155 | atomic_dec(v: &data->disabled); |
156 | } |
157 | #endif /* CONFIG_FUNCTION_TRACER */ |
158 | |
159 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
160 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
161 | { |
162 | int cpu; |
163 | |
164 | if (!(is_graph(tr) ^ set)) |
165 | return 0; |
166 | |
167 | stop_irqsoff_tracer(tr: irqsoff_trace, graph: !set); |
168 | |
169 | for_each_possible_cpu(cpu) |
170 | per_cpu(tracing_cpu, cpu) = 0; |
171 | |
172 | tr->max_latency = 0; |
173 | tracing_reset_online_cpus(buf: &irqsoff_trace->array_buffer); |
174 | |
175 | return start_irqsoff_tracer(tr: irqsoff_trace, graph: set); |
176 | } |
177 | |
178 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
179 | { |
180 | struct trace_array *tr = irqsoff_trace; |
181 | struct trace_array_cpu *data; |
182 | unsigned long flags; |
183 | unsigned int trace_ctx; |
184 | int ret; |
185 | |
186 | if (ftrace_graph_ignore_func(trace)) |
187 | return 0; |
188 | /* |
189 | * Do not trace a function if it's filtered by set_graph_notrace. |
190 | * Make the index of ret stack negative to indicate that it should |
191 | * ignore further functions. But it needs its own ret stack entry |
192 | * to recover the original index in order to continue tracing after |
193 | * returning from the function. |
194 | */ |
195 | if (ftrace_graph_notrace_addr(addr: trace->func)) |
196 | return 1; |
197 | |
198 | if (!func_prolog_dec(tr, data: &data, flags: &flags)) |
199 | return 0; |
200 | |
201 | trace_ctx = tracing_gen_ctx_flags(irqflags: flags); |
202 | ret = __trace_graph_entry(tr, trace, trace_ctx); |
203 | atomic_dec(v: &data->disabled); |
204 | |
205 | return ret; |
206 | } |
207 | |
208 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) |
209 | { |
210 | struct trace_array *tr = irqsoff_trace; |
211 | struct trace_array_cpu *data; |
212 | unsigned long flags; |
213 | unsigned int trace_ctx; |
214 | |
215 | ftrace_graph_addr_finish(trace); |
216 | |
217 | if (!func_prolog_dec(tr, data: &data, flags: &flags)) |
218 | return; |
219 | |
220 | trace_ctx = tracing_gen_ctx_flags(irqflags: flags); |
221 | __trace_graph_return(tr, trace, trace_ctx); |
222 | atomic_dec(v: &data->disabled); |
223 | } |
224 | |
225 | static struct fgraph_ops fgraph_ops = { |
226 | .entryfunc = &irqsoff_graph_entry, |
227 | .retfunc = &irqsoff_graph_return, |
228 | }; |
229 | |
230 | static void irqsoff_trace_open(struct trace_iterator *iter) |
231 | { |
232 | if (is_graph(iter->tr)) |
233 | graph_trace_open(iter); |
234 | else |
235 | iter->private = NULL; |
236 | } |
237 | |
238 | static void irqsoff_trace_close(struct trace_iterator *iter) |
239 | { |
240 | if (iter->private) |
241 | graph_trace_close(iter); |
242 | } |
243 | |
244 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
245 | TRACE_GRAPH_PRINT_PROC | \ |
246 | TRACE_GRAPH_PRINT_REL_TIME | \ |
247 | TRACE_GRAPH_PRINT_DURATION) |
248 | |
249 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
250 | { |
251 | /* |
252 | * In graph mode call the graph tracer output function, |
253 | * otherwise go with the TRACE_FN event handler |
254 | */ |
255 | if (is_graph(iter->tr)) |
256 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
257 | |
258 | return TRACE_TYPE_UNHANDLED; |
259 | } |
260 | |
261 | static void (struct seq_file *s) |
262 | { |
263 | struct trace_array *tr = irqsoff_trace; |
264 | |
265 | if (is_graph(tr)) |
266 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
267 | else |
268 | trace_default_header(m: s); |
269 | } |
270 | |
271 | static void |
272 | __trace_function(struct trace_array *tr, |
273 | unsigned long ip, unsigned long parent_ip, |
274 | unsigned int trace_ctx) |
275 | { |
276 | if (is_graph(tr)) |
277 | trace_graph_function(tr, ip, parent_ip, trace_ctx); |
278 | else |
279 | trace_function(tr, ip, parent_ip, trace_ctx); |
280 | } |
281 | |
282 | #else |
283 | #define __trace_function trace_function |
284 | |
285 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
286 | { |
287 | return TRACE_TYPE_UNHANDLED; |
288 | } |
289 | |
290 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
291 | static void irqsoff_trace_close(struct trace_iterator *iter) { } |
292 | |
293 | #ifdef CONFIG_FUNCTION_TRACER |
294 | static void irqsoff_print_header(struct seq_file *s) |
295 | { |
296 | trace_default_header(s); |
297 | } |
298 | #else |
299 | static void irqsoff_print_header(struct seq_file *s) |
300 | { |
301 | trace_latency_header(s); |
302 | } |
303 | #endif /* CONFIG_FUNCTION_TRACER */ |
304 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
305 | |
306 | /* |
307 | * Should this new latency be reported/recorded? |
308 | */ |
309 | static bool report_latency(struct trace_array *tr, u64 delta) |
310 | { |
311 | if (tracing_thresh) { |
312 | if (delta < tracing_thresh) |
313 | return false; |
314 | } else { |
315 | if (delta <= tr->max_latency) |
316 | return false; |
317 | } |
318 | return true; |
319 | } |
320 | |
321 | static void |
322 | check_critical_timing(struct trace_array *tr, |
323 | struct trace_array_cpu *data, |
324 | unsigned long parent_ip, |
325 | int cpu) |
326 | { |
327 | u64 T0, T1, delta; |
328 | unsigned long flags; |
329 | unsigned int trace_ctx; |
330 | |
331 | T0 = data->preempt_timestamp; |
332 | T1 = ftrace_now(cpu); |
333 | delta = T1-T0; |
334 | |
335 | trace_ctx = tracing_gen_ctx(); |
336 | |
337 | if (!report_latency(tr, delta)) |
338 | goto out; |
339 | |
340 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
341 | |
342 | /* check if we are still the max latency */ |
343 | if (!report_latency(tr, delta)) |
344 | goto out_unlock; |
345 | |
346 | __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx); |
347 | /* Skip 5 functions to get to the irq/preempt enable function */ |
348 | __trace_stack(tr, trace_ctx, skip: 5); |
349 | |
350 | if (data->critical_sequence != max_sequence) |
351 | goto out_unlock; |
352 | |
353 | data->critical_end = parent_ip; |
354 | |
355 | if (likely(!is_tracing_stopped())) { |
356 | tr->max_latency = delta; |
357 | update_max_tr_single(tr, current, cpu); |
358 | } |
359 | |
360 | max_sequence++; |
361 | |
362 | out_unlock: |
363 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
364 | |
365 | out: |
366 | data->critical_sequence = max_sequence; |
367 | data->preempt_timestamp = ftrace_now(cpu); |
368 | __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx); |
369 | } |
370 | |
371 | static nokprobe_inline void |
372 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
373 | { |
374 | int cpu; |
375 | struct trace_array *tr = irqsoff_trace; |
376 | struct trace_array_cpu *data; |
377 | |
378 | if (!tracer_enabled || !tracing_is_enabled()) |
379 | return; |
380 | |
381 | cpu = raw_smp_processor_id(); |
382 | |
383 | if (per_cpu(tracing_cpu, cpu)) |
384 | return; |
385 | |
386 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
387 | |
388 | if (unlikely(!data) || atomic_read(v: &data->disabled)) |
389 | return; |
390 | |
391 | atomic_inc(v: &data->disabled); |
392 | |
393 | data->critical_sequence = max_sequence; |
394 | data->preempt_timestamp = ftrace_now(cpu); |
395 | data->critical_start = parent_ip ? : ip; |
396 | |
397 | __trace_function(tr, ip, parent_ip, trace_ctx: tracing_gen_ctx()); |
398 | |
399 | per_cpu(tracing_cpu, cpu) = 1; |
400 | |
401 | atomic_dec(v: &data->disabled); |
402 | } |
403 | |
404 | static nokprobe_inline void |
405 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
406 | { |
407 | int cpu; |
408 | struct trace_array *tr = irqsoff_trace; |
409 | struct trace_array_cpu *data; |
410 | unsigned int trace_ctx; |
411 | |
412 | cpu = raw_smp_processor_id(); |
413 | /* Always clear the tracing cpu on stopping the trace */ |
414 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
415 | per_cpu(tracing_cpu, cpu) = 0; |
416 | else |
417 | return; |
418 | |
419 | if (!tracer_enabled || !tracing_is_enabled()) |
420 | return; |
421 | |
422 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
423 | |
424 | if (unlikely(!data) || |
425 | !data->critical_start || atomic_read(v: &data->disabled)) |
426 | return; |
427 | |
428 | atomic_inc(v: &data->disabled); |
429 | |
430 | trace_ctx = tracing_gen_ctx(); |
431 | __trace_function(tr, ip, parent_ip, trace_ctx); |
432 | check_critical_timing(tr, data, parent_ip: parent_ip ? : ip, cpu); |
433 | data->critical_start = 0; |
434 | atomic_dec(v: &data->disabled); |
435 | } |
436 | |
437 | /* start and stop critical timings used to for stoppage (in idle) */ |
438 | void start_critical_timings(void) |
439 | { |
440 | if (preempt_trace(pc: preempt_count()) || irq_trace()) |
441 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
442 | } |
443 | EXPORT_SYMBOL_GPL(start_critical_timings); |
444 | NOKPROBE_SYMBOL(start_critical_timings); |
445 | |
446 | void stop_critical_timings(void) |
447 | { |
448 | if (preempt_trace(pc: preempt_count()) || irq_trace()) |
449 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
450 | } |
451 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
452 | NOKPROBE_SYMBOL(stop_critical_timings); |
453 | |
454 | #ifdef CONFIG_FUNCTION_TRACER |
455 | static bool function_enabled; |
456 | |
457 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
458 | { |
459 | int ret; |
460 | |
461 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
462 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
463 | return 0; |
464 | |
465 | if (graph) |
466 | ret = register_ftrace_graph(ops: &fgraph_ops); |
467 | else |
468 | ret = register_ftrace_function(ops: tr->ops); |
469 | |
470 | if (!ret) |
471 | function_enabled = true; |
472 | |
473 | return ret; |
474 | } |
475 | |
476 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
477 | { |
478 | if (!function_enabled) |
479 | return; |
480 | |
481 | if (graph) |
482 | unregister_ftrace_graph(ops: &fgraph_ops); |
483 | else |
484 | unregister_ftrace_function(ops: tr->ops); |
485 | |
486 | function_enabled = false; |
487 | } |
488 | |
489 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
490 | { |
491 | if (!(mask & TRACE_ITER_FUNCTION)) |
492 | return 0; |
493 | |
494 | if (set) |
495 | register_irqsoff_function(tr, is_graph(tr), set: 1); |
496 | else |
497 | unregister_irqsoff_function(tr, is_graph(tr)); |
498 | return 1; |
499 | } |
500 | #else |
501 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
502 | { |
503 | return 0; |
504 | } |
505 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
506 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
507 | { |
508 | return 0; |
509 | } |
510 | #endif /* CONFIG_FUNCTION_TRACER */ |
511 | |
512 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
513 | { |
514 | struct tracer *tracer = tr->current_trace; |
515 | |
516 | if (irqsoff_function_set(tr, mask, set)) |
517 | return 0; |
518 | |
519 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
520 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
521 | return irqsoff_display_graph(tr, set); |
522 | #endif |
523 | |
524 | return trace_keep_overwrite(tracer, mask, set); |
525 | } |
526 | |
527 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) |
528 | { |
529 | int ret; |
530 | |
531 | ret = register_irqsoff_function(tr, graph, set: 0); |
532 | |
533 | if (!ret && tracing_is_enabled()) |
534 | tracer_enabled = 1; |
535 | else |
536 | tracer_enabled = 0; |
537 | |
538 | return ret; |
539 | } |
540 | |
541 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
542 | { |
543 | tracer_enabled = 0; |
544 | |
545 | unregister_irqsoff_function(tr, graph); |
546 | } |
547 | |
548 | static bool irqsoff_busy; |
549 | |
550 | static int __irqsoff_tracer_init(struct trace_array *tr) |
551 | { |
552 | if (irqsoff_busy) |
553 | return -EBUSY; |
554 | |
555 | save_flags = tr->trace_flags; |
556 | |
557 | /* non overwrite screws up the latency tracers */ |
558 | set_tracer_flag(tr, mask: TRACE_ITER_OVERWRITE, enabled: 1); |
559 | set_tracer_flag(tr, mask: TRACE_ITER_LATENCY_FMT, enabled: 1); |
560 | /* without pause, we will produce garbage if another latency occurs */ |
561 | set_tracer_flag(tr, mask: TRACE_ITER_PAUSE_ON_TRACE, enabled: 1); |
562 | |
563 | tr->max_latency = 0; |
564 | irqsoff_trace = tr; |
565 | /* make sure that the tracer is visible */ |
566 | smp_wmb(); |
567 | |
568 | ftrace_init_array_ops(tr, func: irqsoff_tracer_call); |
569 | |
570 | /* Only toplevel instance supports graph tracing */ |
571 | if (start_irqsoff_tracer(tr, graph: (tr->flags & TRACE_ARRAY_FL_GLOBAL && |
572 | is_graph(tr)))) |
573 | printk(KERN_ERR "failed to start irqsoff tracer\n" ); |
574 | |
575 | irqsoff_busy = true; |
576 | return 0; |
577 | } |
578 | |
579 | static void __irqsoff_tracer_reset(struct trace_array *tr) |
580 | { |
581 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
582 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; |
583 | int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE; |
584 | |
585 | stop_irqsoff_tracer(tr, is_graph(tr)); |
586 | |
587 | set_tracer_flag(tr, mask: TRACE_ITER_LATENCY_FMT, enabled: lat_flag); |
588 | set_tracer_flag(tr, mask: TRACE_ITER_OVERWRITE, enabled: overwrite_flag); |
589 | set_tracer_flag(tr, mask: TRACE_ITER_PAUSE_ON_TRACE, enabled: pause_flag); |
590 | ftrace_reset_array_ops(tr); |
591 | |
592 | irqsoff_busy = false; |
593 | } |
594 | |
595 | static void irqsoff_tracer_start(struct trace_array *tr) |
596 | { |
597 | tracer_enabled = 1; |
598 | } |
599 | |
600 | static void irqsoff_tracer_stop(struct trace_array *tr) |
601 | { |
602 | tracer_enabled = 0; |
603 | } |
604 | |
605 | #ifdef CONFIG_IRQSOFF_TRACER |
606 | /* |
607 | * We are only interested in hardirq on/off events: |
608 | */ |
609 | void tracer_hardirqs_on(unsigned long a0, unsigned long a1) |
610 | { |
611 | if (!preempt_trace(pc: preempt_count()) && irq_trace()) |
612 | stop_critical_timing(ip: a0, parent_ip: a1); |
613 | } |
614 | NOKPROBE_SYMBOL(tracer_hardirqs_on); |
615 | |
616 | void tracer_hardirqs_off(unsigned long a0, unsigned long a1) |
617 | { |
618 | if (!preempt_trace(pc: preempt_count()) && irq_trace()) |
619 | start_critical_timing(ip: a0, parent_ip: a1); |
620 | } |
621 | NOKPROBE_SYMBOL(tracer_hardirqs_off); |
622 | |
623 | static int irqsoff_tracer_init(struct trace_array *tr) |
624 | { |
625 | trace_type = TRACER_IRQS_OFF; |
626 | |
627 | return __irqsoff_tracer_init(tr); |
628 | } |
629 | |
630 | static void irqsoff_tracer_reset(struct trace_array *tr) |
631 | { |
632 | __irqsoff_tracer_reset(tr); |
633 | } |
634 | |
635 | static struct tracer irqsoff_tracer __read_mostly = |
636 | { |
637 | .name = "irqsoff" , |
638 | .init = irqsoff_tracer_init, |
639 | .reset = irqsoff_tracer_reset, |
640 | .start = irqsoff_tracer_start, |
641 | .stop = irqsoff_tracer_stop, |
642 | .print_max = true, |
643 | .print_header = irqsoff_print_header, |
644 | .print_line = irqsoff_print_line, |
645 | .flag_changed = irqsoff_flag_changed, |
646 | #ifdef CONFIG_FTRACE_SELFTEST |
647 | .selftest = trace_selftest_startup_irqsoff, |
648 | #endif |
649 | .open = irqsoff_trace_open, |
650 | .close = irqsoff_trace_close, |
651 | .allow_instances = true, |
652 | .use_max_tr = true, |
653 | }; |
654 | #endif /* CONFIG_IRQSOFF_TRACER */ |
655 | |
656 | #ifdef CONFIG_PREEMPT_TRACER |
657 | void tracer_preempt_on(unsigned long a0, unsigned long a1) |
658 | { |
659 | if (preempt_trace(pc: preempt_count()) && !irq_trace()) |
660 | stop_critical_timing(ip: a0, parent_ip: a1); |
661 | } |
662 | |
663 | void tracer_preempt_off(unsigned long a0, unsigned long a1) |
664 | { |
665 | if (preempt_trace(pc: preempt_count()) && !irq_trace()) |
666 | start_critical_timing(ip: a0, parent_ip: a1); |
667 | } |
668 | |
669 | static int preemptoff_tracer_init(struct trace_array *tr) |
670 | { |
671 | trace_type = TRACER_PREEMPT_OFF; |
672 | |
673 | return __irqsoff_tracer_init(tr); |
674 | } |
675 | |
676 | static void preemptoff_tracer_reset(struct trace_array *tr) |
677 | { |
678 | __irqsoff_tracer_reset(tr); |
679 | } |
680 | |
681 | static struct tracer preemptoff_tracer __read_mostly = |
682 | { |
683 | .name = "preemptoff" , |
684 | .init = preemptoff_tracer_init, |
685 | .reset = preemptoff_tracer_reset, |
686 | .start = irqsoff_tracer_start, |
687 | .stop = irqsoff_tracer_stop, |
688 | .print_max = true, |
689 | .print_header = irqsoff_print_header, |
690 | .print_line = irqsoff_print_line, |
691 | .flag_changed = irqsoff_flag_changed, |
692 | #ifdef CONFIG_FTRACE_SELFTEST |
693 | .selftest = trace_selftest_startup_preemptoff, |
694 | #endif |
695 | .open = irqsoff_trace_open, |
696 | .close = irqsoff_trace_close, |
697 | .allow_instances = true, |
698 | .use_max_tr = true, |
699 | }; |
700 | #endif /* CONFIG_PREEMPT_TRACER */ |
701 | |
702 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
703 | |
704 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
705 | { |
706 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
707 | |
708 | return __irqsoff_tracer_init(tr); |
709 | } |
710 | |
711 | static void preemptirqsoff_tracer_reset(struct trace_array *tr) |
712 | { |
713 | __irqsoff_tracer_reset(tr); |
714 | } |
715 | |
716 | static struct tracer preemptirqsoff_tracer __read_mostly = |
717 | { |
718 | .name = "preemptirqsoff" , |
719 | .init = preemptirqsoff_tracer_init, |
720 | .reset = preemptirqsoff_tracer_reset, |
721 | .start = irqsoff_tracer_start, |
722 | .stop = irqsoff_tracer_stop, |
723 | .print_max = true, |
724 | .print_header = irqsoff_print_header, |
725 | .print_line = irqsoff_print_line, |
726 | .flag_changed = irqsoff_flag_changed, |
727 | #ifdef CONFIG_FTRACE_SELFTEST |
728 | .selftest = trace_selftest_startup_preemptirqsoff, |
729 | #endif |
730 | .open = irqsoff_trace_open, |
731 | .close = irqsoff_trace_close, |
732 | .allow_instances = true, |
733 | .use_max_tr = true, |
734 | }; |
735 | #endif |
736 | |
737 | __init static int init_irqsoff_tracer(void) |
738 | { |
739 | #ifdef CONFIG_IRQSOFF_TRACER |
740 | register_tracer(type: &irqsoff_tracer); |
741 | #endif |
742 | #ifdef CONFIG_PREEMPT_TRACER |
743 | register_tracer(type: &preemptoff_tracer); |
744 | #endif |
745 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
746 | register_tracer(type: &preemptirqsoff_tracer); |
747 | #endif |
748 | |
749 | return 0; |
750 | } |
751 | core_initcall(init_irqsoff_tracer); |
752 | #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ |
753 | |