1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * trace_boot.c |
4 | * Tracing kernel boot-time |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) "trace_boot: " fmt |
8 | |
9 | #include <linux/bootconfig.h> |
10 | #include <linux/cpumask.h> |
11 | #include <linux/ftrace.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/string.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/trace.h> |
18 | #include <linux/trace_events.h> |
19 | |
20 | #include "trace.h" |
21 | |
22 | #define MAX_BUF_LEN 256 |
23 | |
24 | static void __init |
25 | trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) |
26 | { |
27 | struct xbc_node *anode; |
28 | const char *p; |
29 | char buf[MAX_BUF_LEN]; |
30 | unsigned long v = 0; |
31 | |
32 | /* Common ftrace options */ |
33 | xbc_node_for_each_array_value(node, "options" , anode, p) { |
34 | if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) { |
35 | pr_err("String is too long: %s\n" , p); |
36 | continue; |
37 | } |
38 | |
39 | if (trace_set_options(tr, option: buf) < 0) |
40 | pr_err("Failed to set option: %s\n" , buf); |
41 | } |
42 | |
43 | p = xbc_node_find_value(parent: node, key: "tracing_on" , NULL); |
44 | if (p && *p != '\0') { |
45 | if (kstrtoul(s: p, base: 10, res: &v)) |
46 | pr_err("Failed to set tracing on: %s\n" , p); |
47 | if (v) |
48 | tracer_tracing_on(tr); |
49 | else |
50 | tracer_tracing_off(tr); |
51 | } |
52 | |
53 | p = xbc_node_find_value(parent: node, key: "trace_clock" , NULL); |
54 | if (p && *p != '\0') { |
55 | if (tracing_set_clock(tr, clockstr: p) < 0) |
56 | pr_err("Failed to set trace clock: %s\n" , p); |
57 | } |
58 | |
59 | p = xbc_node_find_value(parent: node, key: "buffer_size" , NULL); |
60 | if (p && *p != '\0') { |
61 | v = memparse(ptr: p, NULL); |
62 | if (v < PAGE_SIZE) |
63 | pr_err("Buffer size is too small: %s\n" , p); |
64 | if (tracing_resize_ring_buffer(tr, size: v, RING_BUFFER_ALL_CPUS) < 0) |
65 | pr_err("Failed to resize trace buffer to %s\n" , p); |
66 | } |
67 | |
68 | p = xbc_node_find_value(parent: node, key: "cpumask" , NULL); |
69 | if (p && *p != '\0') { |
70 | cpumask_var_t new_mask; |
71 | |
72 | if (alloc_cpumask_var(mask: &new_mask, GFP_KERNEL)) { |
73 | if (cpumask_parse(buf: p, dstp: new_mask) < 0 || |
74 | tracing_set_cpumask(tr, tracing_cpumask_new: new_mask) < 0) |
75 | pr_err("Failed to set new CPU mask %s\n" , p); |
76 | free_cpumask_var(mask: new_mask); |
77 | } |
78 | } |
79 | } |
80 | |
81 | #ifdef CONFIG_EVENT_TRACING |
82 | static void __init |
83 | trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) |
84 | { |
85 | struct xbc_node *anode; |
86 | char buf[MAX_BUF_LEN]; |
87 | const char *p; |
88 | |
89 | xbc_node_for_each_array_value(node, "events" , anode, p) { |
90 | if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) { |
91 | pr_err("String is too long: %s\n" , p); |
92 | continue; |
93 | } |
94 | |
95 | if (ftrace_set_clr_event(tr, buf, set: 1) < 0) |
96 | pr_err("Failed to enable event: %s\n" , p); |
97 | } |
98 | } |
99 | |
100 | #ifdef CONFIG_KPROBE_EVENTS |
101 | static int __init |
102 | trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) |
103 | { |
104 | struct dynevent_cmd cmd; |
105 | struct xbc_node *anode; |
106 | char buf[MAX_BUF_LEN]; |
107 | const char *val; |
108 | int ret = 0; |
109 | |
110 | xbc_node_for_each_array_value(node, "probes" , anode, val) { |
111 | kprobe_event_cmd_init(cmd: &cmd, buf, MAX_BUF_LEN); |
112 | |
113 | ret = kprobe_event_gen_cmd_start(&cmd, event, val); |
114 | if (ret) { |
115 | pr_err("Failed to generate probe: %s\n" , buf); |
116 | break; |
117 | } |
118 | |
119 | ret = kprobe_event_gen_cmd_end(&cmd); |
120 | if (ret) { |
121 | pr_err("Failed to add probe: %s\n" , buf); |
122 | break; |
123 | } |
124 | } |
125 | |
126 | return ret; |
127 | } |
128 | #else |
129 | static inline int __init |
130 | trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) |
131 | { |
132 | pr_err("Kprobe event is not supported.\n" ); |
133 | return -ENOTSUPP; |
134 | } |
135 | #endif |
136 | |
137 | #ifdef CONFIG_SYNTH_EVENTS |
138 | static int __init |
139 | trace_boot_add_synth_event(struct xbc_node *node, const char *event) |
140 | { |
141 | struct dynevent_cmd cmd; |
142 | struct xbc_node *anode; |
143 | char buf[MAX_BUF_LEN]; |
144 | const char *p; |
145 | int ret; |
146 | |
147 | synth_event_cmd_init(cmd: &cmd, buf, MAX_BUF_LEN); |
148 | |
149 | ret = synth_event_gen_cmd_start(&cmd, event, NULL); |
150 | if (ret) |
151 | return ret; |
152 | |
153 | xbc_node_for_each_array_value(node, "fields" , anode, p) { |
154 | ret = synth_event_add_field_str(cmd: &cmd, type_name: p); |
155 | if (ret) |
156 | return ret; |
157 | } |
158 | |
159 | ret = synth_event_gen_cmd_end(&cmd); |
160 | if (ret < 0) |
161 | pr_err("Failed to add synthetic event: %s\n" , buf); |
162 | |
163 | return ret; |
164 | } |
165 | #else |
166 | static inline int __init |
167 | trace_boot_add_synth_event(struct xbc_node *node, const char *event) |
168 | { |
169 | pr_err("Synthetic event is not supported.\n" ); |
170 | return -ENOTSUPP; |
171 | } |
172 | #endif |
173 | |
174 | #ifdef CONFIG_HIST_TRIGGERS |
175 | static int __init __printf(3, 4) |
176 | append_printf(char **bufp, char *end, const char *fmt, ...) |
177 | { |
178 | va_list args; |
179 | int ret; |
180 | |
181 | if (*bufp == end) |
182 | return -ENOSPC; |
183 | |
184 | va_start(args, fmt); |
185 | ret = vsnprintf(buf: *bufp, size: end - *bufp, fmt, args); |
186 | if (ret < end - *bufp) { |
187 | *bufp += ret; |
188 | } else { |
189 | *bufp = end; |
190 | ret = -ERANGE; |
191 | } |
192 | va_end(args); |
193 | |
194 | return ret; |
195 | } |
196 | |
197 | static int __init |
198 | append_str_nospace(char **bufp, char *end, const char *str) |
199 | { |
200 | char *p = *bufp; |
201 | int len; |
202 | |
203 | while (p < end - 1 && *str != '\0') { |
204 | if (!isspace(*str)) |
205 | *(p++) = *str; |
206 | str++; |
207 | } |
208 | *p = '\0'; |
209 | if (p == end - 1) { |
210 | *bufp = end; |
211 | return -ENOSPC; |
212 | } |
213 | len = p - *bufp; |
214 | *bufp = p; |
215 | return (int)len; |
216 | } |
217 | |
218 | static int __init |
219 | trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp, |
220 | char *end, const char *key) |
221 | { |
222 | struct xbc_node *anode; |
223 | const char *p; |
224 | char sep; |
225 | |
226 | p = xbc_node_find_value(parent: hnode, key, vnode: &anode); |
227 | if (p) { |
228 | if (!anode) { |
229 | pr_err("hist.%s requires value(s).\n" , key); |
230 | return -EINVAL; |
231 | } |
232 | |
233 | append_printf(bufp, end, fmt: ":%s" , key); |
234 | sep = '='; |
235 | xbc_array_for_each_value(anode, p) { |
236 | append_printf(bufp, end, fmt: "%c%s" , sep, p); |
237 | if (sep == '=') |
238 | sep = ','; |
239 | } |
240 | } else |
241 | return -ENOENT; |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | static int __init |
247 | trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp, |
248 | char *end, const char *handler, |
249 | const char *param) |
250 | { |
251 | struct xbc_node *knode, *anode; |
252 | const char *p; |
253 | char sep; |
254 | |
255 | /* Compose 'handler' parameter */ |
256 | p = xbc_node_find_value(parent: hnode, key: param, NULL); |
257 | if (!p) { |
258 | pr_err("hist.%s requires '%s' option.\n" , |
259 | xbc_node_get_data(hnode), param); |
260 | return -EINVAL; |
261 | } |
262 | append_printf(bufp, end, fmt: ":%s(%s)" , handler, p); |
263 | |
264 | /* Compose 'action' parameter */ |
265 | knode = xbc_node_find_subkey(parent: hnode, key: "trace" ); |
266 | if (!knode) |
267 | knode = xbc_node_find_subkey(parent: hnode, key: "save" ); |
268 | |
269 | if (knode) { |
270 | anode = xbc_node_get_child(node: knode); |
271 | if (!anode || !xbc_node_is_value(node: anode)) { |
272 | pr_err("hist.%s.%s requires value(s).\n" , |
273 | xbc_node_get_data(hnode), |
274 | xbc_node_get_data(knode)); |
275 | return -EINVAL; |
276 | } |
277 | |
278 | append_printf(bufp, end, fmt: ".%s" , xbc_node_get_data(node: knode)); |
279 | sep = '('; |
280 | xbc_array_for_each_value(anode, p) { |
281 | append_printf(bufp, end, fmt: "%c%s" , sep, p); |
282 | if (sep == '(') |
283 | sep = ','; |
284 | } |
285 | append_printf(bufp, end, fmt: ")" ); |
286 | } else if (xbc_node_find_subkey(parent: hnode, key: "snapshot" )) { |
287 | append_printf(bufp, end, fmt: ".snapshot()" ); |
288 | } else { |
289 | pr_err("hist.%s requires an action.\n" , |
290 | xbc_node_get_data(hnode)); |
291 | return -EINVAL; |
292 | } |
293 | |
294 | return 0; |
295 | } |
296 | |
297 | static int __init |
298 | trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp, |
299 | char *end, const char *param) |
300 | { |
301 | struct xbc_node *node; |
302 | const char *p, *handler; |
303 | int ret = 0; |
304 | |
305 | handler = xbc_node_get_data(node: hnode); |
306 | |
307 | xbc_node_for_each_subkey(hnode, node) { |
308 | p = xbc_node_get_data(node); |
309 | if (!isdigit(c: p[0])) |
310 | continue; |
311 | /* All digit started node should be instances. */ |
312 | ret = trace_boot_hist_add_one_handler(hnode: node, bufp, end, handler, param); |
313 | if (ret < 0) |
314 | break; |
315 | } |
316 | |
317 | if (xbc_node_find_subkey(parent: hnode, key: param)) |
318 | ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param); |
319 | |
320 | return ret; |
321 | } |
322 | |
323 | /* |
324 | * Histogram boottime tracing syntax. |
325 | * |
326 | * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] { |
327 | * keys = <KEY>[,...] |
328 | * values = <VAL>[,...] |
329 | * sort = <SORT-KEY>[,...] |
330 | * size = <ENTRIES> |
331 | * name = <HISTNAME> |
332 | * var { <VAR> = <EXPR> ... } |
333 | * pause|continue|clear |
334 | * onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] } |
335 | * onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] } |
336 | * filter = <FILTER> |
337 | * } |
338 | * |
339 | * Where <ACTION> are; |
340 | * |
341 | * trace = <EVENT>, <ARG1>[, ...] |
342 | * save = <ARG1>[, ...] |
343 | * snapshot |
344 | */ |
345 | static int __init |
346 | trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size) |
347 | { |
348 | struct xbc_node *node, *knode; |
349 | char *end = buf + size; |
350 | const char *p; |
351 | int ret = 0; |
352 | |
353 | append_printf(bufp: &buf, end, fmt: "hist" ); |
354 | |
355 | ret = trace_boot_hist_add_array(hnode, bufp: &buf, end, key: "keys" ); |
356 | if (ret < 0) { |
357 | if (ret == -ENOENT) |
358 | pr_err("hist requires keys.\n" ); |
359 | return -EINVAL; |
360 | } |
361 | |
362 | ret = trace_boot_hist_add_array(hnode, bufp: &buf, end, key: "values" ); |
363 | if (ret == -EINVAL) |
364 | return ret; |
365 | ret = trace_boot_hist_add_array(hnode, bufp: &buf, end, key: "sort" ); |
366 | if (ret == -EINVAL) |
367 | return ret; |
368 | |
369 | p = xbc_node_find_value(parent: hnode, key: "size" , NULL); |
370 | if (p) |
371 | append_printf(bufp: &buf, end, fmt: ":size=%s" , p); |
372 | |
373 | p = xbc_node_find_value(parent: hnode, key: "name" , NULL); |
374 | if (p) |
375 | append_printf(bufp: &buf, end, fmt: ":name=%s" , p); |
376 | |
377 | node = xbc_node_find_subkey(parent: hnode, key: "var" ); |
378 | if (node) { |
379 | xbc_node_for_each_key_value(node, knode, p) { |
380 | /* Expression must not include spaces. */ |
381 | append_printf(bufp: &buf, end, fmt: ":%s=" , |
382 | xbc_node_get_data(node: knode)); |
383 | append_str_nospace(bufp: &buf, end, str: p); |
384 | } |
385 | } |
386 | |
387 | /* Histogram control attributes (mutual exclusive) */ |
388 | if (xbc_node_find_value(parent: hnode, key: "pause" , NULL)) |
389 | append_printf(bufp: &buf, end, fmt: ":pause" ); |
390 | else if (xbc_node_find_value(parent: hnode, key: "continue" , NULL)) |
391 | append_printf(bufp: &buf, end, fmt: ":continue" ); |
392 | else if (xbc_node_find_value(parent: hnode, key: "clear" , NULL)) |
393 | append_printf(bufp: &buf, end, fmt: ":clear" ); |
394 | |
395 | /* Histogram handler and actions */ |
396 | node = xbc_node_find_subkey(parent: hnode, key: "onmax" ); |
397 | if (node && trace_boot_hist_add_handlers(hnode: node, bufp: &buf, end, param: "var" ) < 0) |
398 | return -EINVAL; |
399 | node = xbc_node_find_subkey(parent: hnode, key: "onchange" ); |
400 | if (node && trace_boot_hist_add_handlers(hnode: node, bufp: &buf, end, param: "var" ) < 0) |
401 | return -EINVAL; |
402 | node = xbc_node_find_subkey(parent: hnode, key: "onmatch" ); |
403 | if (node && trace_boot_hist_add_handlers(hnode: node, bufp: &buf, end, param: "event" ) < 0) |
404 | return -EINVAL; |
405 | |
406 | p = xbc_node_find_value(parent: hnode, key: "filter" , NULL); |
407 | if (p) |
408 | append_printf(bufp: &buf, end, fmt: " if %s" , p); |
409 | |
410 | if (buf == end) { |
411 | pr_err("hist exceeds the max command length.\n" ); |
412 | return -E2BIG; |
413 | } |
414 | |
415 | return 0; |
416 | } |
417 | |
418 | static void __init |
419 | trace_boot_init_histograms(struct trace_event_file *file, |
420 | struct xbc_node *hnode, char *buf, size_t size) |
421 | { |
422 | struct xbc_node *node; |
423 | const char *p; |
424 | char *tmp; |
425 | |
426 | xbc_node_for_each_subkey(hnode, node) { |
427 | p = xbc_node_get_data(node); |
428 | if (!isdigit(c: p[0])) |
429 | continue; |
430 | /* All digit started node should be instances. */ |
431 | if (trace_boot_compose_hist_cmd(hnode: node, buf, size) == 0) { |
432 | tmp = kstrdup(s: buf, GFP_KERNEL); |
433 | if (!tmp) |
434 | return; |
435 | if (trigger_process_regex(file, buff: buf) < 0) |
436 | pr_err("Failed to apply hist trigger: %s\n" , tmp); |
437 | kfree(objp: tmp); |
438 | } |
439 | } |
440 | |
441 | if (xbc_node_find_subkey(parent: hnode, key: "keys" )) { |
442 | if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) { |
443 | tmp = kstrdup(s: buf, GFP_KERNEL); |
444 | if (!tmp) |
445 | return; |
446 | if (trigger_process_regex(file, buff: buf) < 0) |
447 | pr_err("Failed to apply hist trigger: %s\n" , tmp); |
448 | kfree(objp: tmp); |
449 | } |
450 | } |
451 | } |
452 | #else |
453 | static void __init |
454 | trace_boot_init_histograms(struct trace_event_file *file, |
455 | struct xbc_node *hnode, char *buf, size_t size) |
456 | { |
457 | /* do nothing */ |
458 | } |
459 | #endif |
460 | |
461 | static void __init |
462 | trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, |
463 | struct xbc_node *enode) |
464 | { |
465 | struct trace_event_file *file; |
466 | struct xbc_node *anode; |
467 | char buf[MAX_BUF_LEN]; |
468 | const char *p, *group, *event; |
469 | |
470 | group = xbc_node_get_data(node: gnode); |
471 | event = xbc_node_get_data(node: enode); |
472 | |
473 | if (!strcmp(group, "kprobes" )) |
474 | if (trace_boot_add_kprobe_event(node: enode, event) < 0) |
475 | return; |
476 | if (!strcmp(group, "synthetic" )) |
477 | if (trace_boot_add_synth_event(node: enode, event) < 0) |
478 | return; |
479 | |
480 | mutex_lock(&event_mutex); |
481 | file = find_event_file(tr, system: group, event); |
482 | if (!file) { |
483 | pr_err("Failed to find event: %s:%s\n" , group, event); |
484 | goto out; |
485 | } |
486 | |
487 | p = xbc_node_find_value(parent: enode, key: "filter" , NULL); |
488 | if (p && *p != '\0') { |
489 | if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) |
490 | pr_err("filter string is too long: %s\n" , p); |
491 | else if (apply_event_filter(file, filter_string: buf) < 0) |
492 | pr_err("Failed to apply filter: %s\n" , buf); |
493 | } |
494 | |
495 | if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) { |
496 | xbc_node_for_each_array_value(enode, "actions" , anode, p) { |
497 | if (strscpy(buf, p, ARRAY_SIZE(buf)) < 0) |
498 | pr_err("action string is too long: %s\n" , p); |
499 | else if (trigger_process_regex(file, buff: buf) < 0) |
500 | pr_err("Failed to apply an action: %s\n" , p); |
501 | } |
502 | anode = xbc_node_find_subkey(parent: enode, key: "hist" ); |
503 | if (anode) |
504 | trace_boot_init_histograms(file, hnode: anode, buf, ARRAY_SIZE(buf)); |
505 | } else if (xbc_node_find_value(parent: enode, key: "actions" , NULL)) |
506 | pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n" ); |
507 | |
508 | if (xbc_node_find_value(parent: enode, key: "enable" , NULL)) { |
509 | if (trace_event_enable_disable(file, enable: 1, soft_disable: 0) < 0) |
510 | pr_err("Failed to enable event node: %s:%s\n" , |
511 | group, event); |
512 | } |
513 | out: |
514 | mutex_unlock(lock: &event_mutex); |
515 | } |
516 | |
517 | static void __init |
518 | trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) |
519 | { |
520 | struct xbc_node *gnode, *enode; |
521 | bool enable, enable_all = false; |
522 | const char *data; |
523 | |
524 | node = xbc_node_find_subkey(parent: node, key: "event" ); |
525 | if (!node) |
526 | return; |
527 | /* per-event key starts with "event.GROUP.EVENT" */ |
528 | xbc_node_for_each_subkey(node, gnode) { |
529 | data = xbc_node_get_data(node: gnode); |
530 | if (!strcmp(data, "enable" )) { |
531 | enable_all = true; |
532 | continue; |
533 | } |
534 | enable = false; |
535 | xbc_node_for_each_subkey(gnode, enode) { |
536 | data = xbc_node_get_data(node: enode); |
537 | if (!strcmp(data, "enable" )) { |
538 | enable = true; |
539 | continue; |
540 | } |
541 | trace_boot_init_one_event(tr, gnode, enode); |
542 | } |
543 | /* Event enablement must be done after event settings */ |
544 | if (enable) { |
545 | data = xbc_node_get_data(node: gnode); |
546 | trace_array_set_clr_event(tr, system: data, NULL, enable: true); |
547 | } |
548 | } |
549 | /* Ditto */ |
550 | if (enable_all) |
551 | trace_array_set_clr_event(tr, NULL, NULL, enable: true); |
552 | } |
553 | #else |
554 | #define trace_boot_enable_events(tr, node) do {} while (0) |
555 | #define trace_boot_init_events(tr, node) do {} while (0) |
556 | #endif |
557 | |
558 | #ifdef CONFIG_DYNAMIC_FTRACE |
559 | static void __init |
560 | trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) |
561 | { |
562 | struct xbc_node *anode; |
563 | const char *p; |
564 | char *q; |
565 | |
566 | xbc_node_for_each_array_value(node, "ftrace.filters" , anode, p) { |
567 | q = kstrdup(s: p, GFP_KERNEL); |
568 | if (!q) |
569 | return; |
570 | if (ftrace_set_filter(ops: tr->ops, buf: q, strlen(q), reset: 0) < 0) |
571 | pr_err("Failed to add %s to ftrace filter\n" , p); |
572 | else |
573 | ftrace_filter_param = true; |
574 | kfree(objp: q); |
575 | } |
576 | xbc_node_for_each_array_value(node, "ftrace.notraces" , anode, p) { |
577 | q = kstrdup(s: p, GFP_KERNEL); |
578 | if (!q) |
579 | return; |
580 | if (ftrace_set_notrace(ops: tr->ops, buf: q, strlen(q), reset: 0) < 0) |
581 | pr_err("Failed to add %s to ftrace filter\n" , p); |
582 | else |
583 | ftrace_filter_param = true; |
584 | kfree(objp: q); |
585 | } |
586 | } |
587 | #else |
588 | #define trace_boot_set_ftrace_filter(tr, node) do {} while (0) |
589 | #endif |
590 | |
591 | static void __init |
592 | trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) |
593 | { |
594 | const char *p; |
595 | |
596 | trace_boot_set_ftrace_filter(tr, node); |
597 | |
598 | p = xbc_node_find_value(parent: node, key: "tracer" , NULL); |
599 | if (p && *p != '\0') { |
600 | if (tracing_set_tracer(tr, buf: p) < 0) |
601 | pr_err("Failed to set given tracer: %s\n" , p); |
602 | } |
603 | |
604 | /* Since tracer can free snapshot buffer, allocate snapshot here.*/ |
605 | if (xbc_node_find_value(parent: node, key: "alloc_snapshot" , NULL)) { |
606 | if (tracing_alloc_snapshot_instance(tr) < 0) |
607 | pr_err("Failed to allocate snapshot buffer\n" ); |
608 | } |
609 | } |
610 | |
611 | static void __init |
612 | trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node) |
613 | { |
614 | trace_boot_set_instance_options(tr, node); |
615 | trace_boot_init_events(tr, node); |
616 | trace_boot_enable_events(tr, node); |
617 | trace_boot_enable_tracer(tr, node); |
618 | } |
619 | |
620 | static void __init |
621 | trace_boot_init_instances(struct xbc_node *node) |
622 | { |
623 | struct xbc_node *inode; |
624 | struct trace_array *tr; |
625 | const char *p; |
626 | |
627 | node = xbc_node_find_subkey(parent: node, key: "instance" ); |
628 | if (!node) |
629 | return; |
630 | |
631 | xbc_node_for_each_subkey(node, inode) { |
632 | p = xbc_node_get_data(node: inode); |
633 | if (!p || *p == '\0') |
634 | continue; |
635 | |
636 | tr = trace_array_get_by_name(name: p, NULL); |
637 | if (!tr) { |
638 | pr_err("Failed to get trace instance %s\n" , p); |
639 | continue; |
640 | } |
641 | trace_boot_init_one_instance(tr, node: inode); |
642 | trace_array_put(tr); |
643 | } |
644 | } |
645 | |
646 | static int __init trace_boot_init(void) |
647 | { |
648 | struct xbc_node *trace_node; |
649 | struct trace_array *tr; |
650 | |
651 | trace_node = xbc_find_node(key: "ftrace" ); |
652 | if (!trace_node) |
653 | return 0; |
654 | |
655 | tr = top_trace_array(); |
656 | if (!tr) |
657 | return 0; |
658 | |
659 | /* Global trace array is also one instance */ |
660 | trace_boot_init_one_instance(tr, node: trace_node); |
661 | trace_boot_init_instances(node: trace_node); |
662 | |
663 | disable_tracing_selftest(reason: "running boot-time tracing" ); |
664 | |
665 | return 0; |
666 | } |
667 | /* |
668 | * Start tracing at the end of core-initcall, so that it starts tracing |
669 | * from the beginning of postcore_initcall. |
670 | */ |
671 | core_initcall_sync(trace_boot_init); |
672 | |