1// SPDX-License-Identifier: GPL-2.0
2/*
3 * kdb helper for dumping the ftrace buffer
4 *
5 * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
6 *
7 * ftrace_dump_buf based on ftrace_dump:
8 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
10 *
11 */
12#include <linux/init.h>
13#include <linux/kgdb.h>
14#include <linux/kdb.h>
15#include <linux/ftrace.h>
16
17#include "trace.h"
18#include "trace_output.h"
19
20static struct trace_iterator iter;
21static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
22
23static void ftrace_dump_buf(int skip_entries, long cpu_file)
24{
25 struct trace_array *tr;
26 unsigned int old_userobj;
27 int cnt = 0, cpu;
28
29 tr = iter.tr;
30
31 old_userobj = tr->trace_flags;
32
33 /* don't look at user memory in panic mode */
34 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
35
36 kdb_printf("Dumping ftrace buffer:\n");
37 if (skip_entries)
38 kdb_printf("(skipping %d entries)\n", skip_entries);
39
40 trace_iterator_reset(iter: &iter);
41 iter.iter_flags |= TRACE_FILE_LAT_FMT;
42
43 if (cpu_file == RING_BUFFER_ALL_CPUS) {
44 for_each_tracing_cpu(cpu) {
45 iter.buffer_iter[cpu] =
46 ring_buffer_read_prepare(buffer: iter.array_buffer->buffer,
47 cpu, GFP_ATOMIC);
48 ring_buffer_read_start(iter: iter.buffer_iter[cpu]);
49 tracing_iter_reset(iter: &iter, cpu);
50 }
51 } else {
52 iter.cpu_file = cpu_file;
53 iter.buffer_iter[cpu_file] =
54 ring_buffer_read_prepare(buffer: iter.array_buffer->buffer,
55 cpu: cpu_file, GFP_ATOMIC);
56 ring_buffer_read_start(iter: iter.buffer_iter[cpu_file]);
57 tracing_iter_reset(iter: &iter, cpu: cpu_file);
58 }
59
60 while (trace_find_next_entry_inc(iter: &iter)) {
61 if (!cnt)
62 kdb_printf("---------------------------------\n");
63 cnt++;
64
65 if (!skip_entries) {
66 print_trace_line(iter: &iter);
67 trace_printk_seq(s: &iter.seq);
68 } else {
69 skip_entries--;
70 }
71
72 if (KDB_FLAG(CMD_INTERRUPT))
73 goto out;
74 }
75
76 if (!cnt)
77 kdb_printf(" (ftrace buffer empty)\n");
78 else
79 kdb_printf("---------------------------------\n");
80
81out:
82 tr->trace_flags = old_userobj;
83
84 for_each_tracing_cpu(cpu) {
85 if (iter.buffer_iter[cpu]) {
86 ring_buffer_read_finish(iter: iter.buffer_iter[cpu]);
87 iter.buffer_iter[cpu] = NULL;
88 }
89 }
90}
91
92/*
93 * kdb_ftdump - Dump the ftrace log buffer
94 */
95static int kdb_ftdump(int argc, const char **argv)
96{
97 int skip_entries = 0;
98 long cpu_file;
99 char *cp;
100 int cnt;
101 int cpu;
102
103 if (argc > 2)
104 return KDB_ARGCOUNT;
105
106 if (argc) {
107 skip_entries = simple_strtol(argv[1], &cp, 0);
108 if (*cp)
109 skip_entries = 0;
110 }
111
112 if (argc == 2) {
113 cpu_file = simple_strtol(argv[2], &cp, 0);
114 if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
115 !cpu_online(cpu: cpu_file))
116 return KDB_BADINT;
117 } else {
118 cpu_file = RING_BUFFER_ALL_CPUS;
119 }
120
121 kdb_trap_printk++;
122
123 trace_init_global_iter(iter: &iter);
124 iter.buffer_iter = buffer_iter;
125
126 for_each_tracing_cpu(cpu) {
127 atomic_inc(v: &per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
128 }
129
130 /* A negative skip_entries means skip all but the last entries */
131 if (skip_entries < 0) {
132 if (cpu_file == RING_BUFFER_ALL_CPUS)
133 cnt = trace_total_entries(NULL);
134 else
135 cnt = trace_total_entries_cpu(NULL, cpu: cpu_file);
136 skip_entries = max(cnt + skip_entries, 0);
137 }
138
139 ftrace_dump_buf(skip_entries, cpu_file);
140
141 for_each_tracing_cpu(cpu) {
142 atomic_dec(v: &per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
143 }
144
145 kdb_trap_printk--;
146
147 return 0;
148}
149
150static kdbtab_t ftdump_cmd = {
151 .name = "ftdump",
152 .func = kdb_ftdump,
153 .usage = "[skip_#entries] [cpu]",
154 .help = "Dump ftrace log; -skip dumps last #entries",
155 .flags = KDB_ENABLE_ALWAYS_SAFE,
156};
157
158static __init int kdb_ftrace_register(void)
159{
160 kdb_register(cmd: &ftdump_cmd);
161 return 0;
162}
163
164late_initcall(kdb_ftrace_register);
165

source code of linux/kernel/trace/trace_kdb.c