1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * event tracer |
4 | * |
5 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> |
6 | * |
7 | * - Added format output of fields of the trace point. |
8 | * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. |
9 | * |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) fmt |
13 | |
14 | #include <linux/workqueue.h> |
15 | #include <linux/security.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/kthread.h> |
18 | #include <linux/tracefs.h> |
19 | #include <linux/uaccess.h> |
20 | #include <linux/module.h> |
21 | #include <linux/ctype.h> |
22 | #include <linux/sort.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/delay.h> |
25 | |
26 | #include <trace/events/sched.h> |
27 | #include <trace/syscall.h> |
28 | |
29 | #include <asm/setup.h> |
30 | |
31 | #include "trace_output.h" |
32 | |
33 | #undef TRACE_SYSTEM |
34 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
35 | |
36 | DEFINE_MUTEX(event_mutex); |
37 | |
38 | LIST_HEAD(ftrace_events); |
39 | static LIST_HEAD(ftrace_generic_fields); |
40 | static LIST_HEAD(ftrace_common_fields); |
41 | static bool eventdir_initialized; |
42 | |
43 | static LIST_HEAD(module_strings); |
44 | |
45 | struct module_string { |
46 | struct list_head next; |
47 | struct module *module; |
48 | char *str; |
49 | }; |
50 | |
51 | #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) |
52 | |
53 | static struct kmem_cache *field_cachep; |
54 | static struct kmem_cache *file_cachep; |
55 | |
56 | static inline int system_refcount(struct event_subsystem *system) |
57 | { |
58 | return system->ref_count; |
59 | } |
60 | |
61 | static int system_refcount_inc(struct event_subsystem *system) |
62 | { |
63 | return system->ref_count++; |
64 | } |
65 | |
66 | static int system_refcount_dec(struct event_subsystem *system) |
67 | { |
68 | return --system->ref_count; |
69 | } |
70 | |
71 | /* Double loops, do not use break, only goto's work */ |
72 | #define do_for_each_event_file(tr, file) \ |
73 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
74 | list_for_each_entry(file, &tr->events, list) |
75 | |
76 | #define do_for_each_event_file_safe(tr, file) \ |
77 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ |
78 | struct trace_event_file *___n; \ |
79 | list_for_each_entry_safe(file, ___n, &tr->events, list) |
80 | |
81 | #define while_for_each_event_file() \ |
82 | } |
83 | |
84 | static struct ftrace_event_field * |
85 | __find_event_field(struct list_head *head, char *name) |
86 | { |
87 | struct ftrace_event_field *field; |
88 | |
89 | list_for_each_entry(field, head, link) { |
90 | if (!strcmp(field->name, name)) |
91 | return field; |
92 | } |
93 | |
94 | return NULL; |
95 | } |
96 | |
97 | struct ftrace_event_field * |
98 | trace_find_event_field(struct trace_event_call *call, char *name) |
99 | { |
100 | struct ftrace_event_field *field; |
101 | struct list_head *head; |
102 | |
103 | head = trace_get_fields(event_call: call); |
104 | field = __find_event_field(head, name); |
105 | if (field) |
106 | return field; |
107 | |
108 | field = __find_event_field(head: &ftrace_generic_fields, name); |
109 | if (field) |
110 | return field; |
111 | |
112 | return __find_event_field(head: &ftrace_common_fields, name); |
113 | } |
114 | |
115 | static int __trace_define_field(struct list_head *head, const char *type, |
116 | const char *name, int offset, int size, |
117 | int is_signed, int filter_type, int len) |
118 | { |
119 | struct ftrace_event_field *field; |
120 | |
121 | field = kmem_cache_alloc(cachep: field_cachep, GFP_TRACE); |
122 | if (!field) |
123 | return -ENOMEM; |
124 | |
125 | field->name = name; |
126 | field->type = type; |
127 | |
128 | if (filter_type == FILTER_OTHER) |
129 | field->filter_type = filter_assign_type(type); |
130 | else |
131 | field->filter_type = filter_type; |
132 | |
133 | field->offset = offset; |
134 | field->size = size; |
135 | field->is_signed = is_signed; |
136 | field->len = len; |
137 | |
138 | list_add(new: &field->link, head); |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | int trace_define_field(struct trace_event_call *call, const char *type, |
144 | const char *name, int offset, int size, int is_signed, |
145 | int filter_type) |
146 | { |
147 | struct list_head *head; |
148 | |
149 | if (WARN_ON(!call->class)) |
150 | return 0; |
151 | |
152 | head = trace_get_fields(event_call: call); |
153 | return __trace_define_field(head, type, name, offset, size, |
154 | is_signed, filter_type, len: 0); |
155 | } |
156 | EXPORT_SYMBOL_GPL(trace_define_field); |
157 | |
158 | static int trace_define_field_ext(struct trace_event_call *call, const char *type, |
159 | const char *name, int offset, int size, int is_signed, |
160 | int filter_type, int len) |
161 | { |
162 | struct list_head *head; |
163 | |
164 | if (WARN_ON(!call->class)) |
165 | return 0; |
166 | |
167 | head = trace_get_fields(event_call: call); |
168 | return __trace_define_field(head, type, name, offset, size, |
169 | is_signed, filter_type, len); |
170 | } |
171 | |
172 | #define __generic_field(type, item, filter_type) \ |
173 | ret = __trace_define_field(&ftrace_generic_fields, #type, \ |
174 | #item, 0, 0, is_signed_type(type), \ |
175 | filter_type, 0); \ |
176 | if (ret) \ |
177 | return ret; |
178 | |
179 | #define __common_field(type, item) \ |
180 | ret = __trace_define_field(&ftrace_common_fields, #type, \ |
181 | "common_" #item, \ |
182 | offsetof(typeof(ent), item), \ |
183 | sizeof(ent.item), \ |
184 | is_signed_type(type), FILTER_OTHER, 0); \ |
185 | if (ret) \ |
186 | return ret; |
187 | |
188 | static int trace_define_generic_fields(void) |
189 | { |
190 | int ret; |
191 | |
192 | __generic_field(int, CPU, FILTER_CPU); |
193 | __generic_field(int, cpu, FILTER_CPU); |
194 | __generic_field(int, common_cpu, FILTER_CPU); |
195 | __generic_field(char *, COMM, FILTER_COMM); |
196 | __generic_field(char *, comm, FILTER_COMM); |
197 | __generic_field(char *, stacktrace, FILTER_STACKTRACE); |
198 | __generic_field(char *, STACKTRACE, FILTER_STACKTRACE); |
199 | |
200 | return ret; |
201 | } |
202 | |
203 | static int trace_define_common_fields(void) |
204 | { |
205 | int ret; |
206 | struct trace_entry ent; |
207 | |
208 | __common_field(unsigned short, type); |
209 | __common_field(unsigned char, flags); |
210 | /* Holds both preempt_count and migrate_disable */ |
211 | __common_field(unsigned char, preempt_count); |
212 | __common_field(int, pid); |
213 | |
214 | return ret; |
215 | } |
216 | |
217 | static void trace_destroy_fields(struct trace_event_call *call) |
218 | { |
219 | struct ftrace_event_field *field, *next; |
220 | struct list_head *head; |
221 | |
222 | head = trace_get_fields(event_call: call); |
223 | list_for_each_entry_safe(field, next, head, link) { |
224 | list_del(entry: &field->link); |
225 | kmem_cache_free(s: field_cachep, objp: field); |
226 | } |
227 | } |
228 | |
229 | /* |
230 | * run-time version of trace_event_get_offsets_<call>() that returns the last |
231 | * accessible offset of trace fields excluding __dynamic_array bytes |
232 | */ |
233 | int trace_event_get_offsets(struct trace_event_call *call) |
234 | { |
235 | struct ftrace_event_field *tail; |
236 | struct list_head *head; |
237 | |
238 | head = trace_get_fields(event_call: call); |
239 | /* |
240 | * head->next points to the last field with the largest offset, |
241 | * since it was added last by trace_define_field() |
242 | */ |
243 | tail = list_first_entry(head, struct ftrace_event_field, link); |
244 | return tail->offset + tail->size; |
245 | } |
246 | |
247 | /* |
248 | * Check if the referenced field is an array and return true, |
249 | * as arrays are OK to dereference. |
250 | */ |
251 | static bool test_field(const char *fmt, struct trace_event_call *call) |
252 | { |
253 | struct trace_event_fields *field = call->class->fields_array; |
254 | const char *array_descriptor; |
255 | const char *p = fmt; |
256 | int len; |
257 | |
258 | if (!(len = str_has_prefix(str: fmt, prefix: "REC->" ))) |
259 | return false; |
260 | fmt += len; |
261 | for (p = fmt; *p; p++) { |
262 | if (!isalnum(*p) && *p != '_') |
263 | break; |
264 | } |
265 | len = p - fmt; |
266 | |
267 | for (; field->type; field++) { |
268 | if (strncmp(field->name, fmt, len) || |
269 | field->name[len]) |
270 | continue; |
271 | array_descriptor = strchr(field->type, '['); |
272 | /* This is an array and is OK to dereference. */ |
273 | return array_descriptor != NULL; |
274 | } |
275 | return false; |
276 | } |
277 | |
278 | /* |
279 | * Examine the print fmt of the event looking for unsafe dereference |
280 | * pointers using %p* that could be recorded in the trace event and |
281 | * much later referenced after the pointer was freed. Dereferencing |
282 | * pointers are OK, if it is dereferenced into the event itself. |
283 | */ |
284 | static void test_event_printk(struct trace_event_call *call) |
285 | { |
286 | u64 dereference_flags = 0; |
287 | bool first = true; |
288 | const char *fmt, *c, *r, *a; |
289 | int parens = 0; |
290 | char in_quote = 0; |
291 | int start_arg = 0; |
292 | int arg = 0; |
293 | int i; |
294 | |
295 | fmt = call->print_fmt; |
296 | |
297 | if (!fmt) |
298 | return; |
299 | |
300 | for (i = 0; fmt[i]; i++) { |
301 | switch (fmt[i]) { |
302 | case '\\': |
303 | i++; |
304 | if (!fmt[i]) |
305 | return; |
306 | continue; |
307 | case '"': |
308 | case '\'': |
309 | /* |
310 | * The print fmt starts with a string that |
311 | * is processed first to find %p* usage, |
312 | * then after the first string, the print fmt |
313 | * contains arguments that are used to check |
314 | * if the dereferenced %p* usage is safe. |
315 | */ |
316 | if (first) { |
317 | if (fmt[i] == '\'') |
318 | continue; |
319 | if (in_quote) { |
320 | arg = 0; |
321 | first = false; |
322 | /* |
323 | * If there was no %p* uses |
324 | * the fmt is OK. |
325 | */ |
326 | if (!dereference_flags) |
327 | return; |
328 | } |
329 | } |
330 | if (in_quote) { |
331 | if (in_quote == fmt[i]) |
332 | in_quote = 0; |
333 | } else { |
334 | in_quote = fmt[i]; |
335 | } |
336 | continue; |
337 | case '%': |
338 | if (!first || !in_quote) |
339 | continue; |
340 | i++; |
341 | if (!fmt[i]) |
342 | return; |
343 | switch (fmt[i]) { |
344 | case '%': |
345 | continue; |
346 | case 'p': |
347 | /* Find dereferencing fields */ |
348 | switch (fmt[i + 1]) { |
349 | case 'B': case 'R': case 'r': |
350 | case 'b': case 'M': case 'm': |
351 | case 'I': case 'i': case 'E': |
352 | case 'U': case 'V': case 'N': |
353 | case 'a': case 'd': case 'D': |
354 | case 'g': case 't': case 'C': |
355 | case 'O': case 'f': |
356 | if (WARN_ONCE(arg == 63, |
357 | "Too many args for event: %s" , |
358 | trace_event_name(call))) |
359 | return; |
360 | dereference_flags |= 1ULL << arg; |
361 | } |
362 | break; |
363 | default: |
364 | { |
365 | bool star = false; |
366 | int j; |
367 | |
368 | /* Increment arg if %*s exists. */ |
369 | for (j = 0; fmt[i + j]; j++) { |
370 | if (isdigit(c: fmt[i + j]) || |
371 | fmt[i + j] == '.') |
372 | continue; |
373 | if (fmt[i + j] == '*') { |
374 | star = true; |
375 | continue; |
376 | } |
377 | if ((fmt[i + j] == 's') && star) |
378 | arg++; |
379 | break; |
380 | } |
381 | break; |
382 | } /* default */ |
383 | |
384 | } /* switch */ |
385 | arg++; |
386 | continue; |
387 | case '(': |
388 | if (in_quote) |
389 | continue; |
390 | parens++; |
391 | continue; |
392 | case ')': |
393 | if (in_quote) |
394 | continue; |
395 | parens--; |
396 | if (WARN_ONCE(parens < 0, |
397 | "Paren mismatch for event: %s\narg='%s'\n%*s" , |
398 | trace_event_name(call), |
399 | fmt + start_arg, |
400 | (i - start_arg) + 5, "^" )) |
401 | return; |
402 | continue; |
403 | case ',': |
404 | if (in_quote || parens) |
405 | continue; |
406 | i++; |
407 | while (isspace(fmt[i])) |
408 | i++; |
409 | start_arg = i; |
410 | if (!(dereference_flags & (1ULL << arg))) |
411 | goto next_arg; |
412 | |
413 | /* Find the REC-> in the argument */ |
414 | c = strchr(fmt + i, ','); |
415 | r = strstr(fmt + i, "REC->" ); |
416 | if (r && (!c || r < c)) { |
417 | /* |
418 | * Addresses of events on the buffer, |
419 | * or an array on the buffer is |
420 | * OK to dereference. |
421 | * There's ways to fool this, but |
422 | * this is to catch common mistakes, |
423 | * not malicious code. |
424 | */ |
425 | a = strchr(fmt + i, '&'); |
426 | if ((a && (a < r)) || test_field(fmt: r, call)) |
427 | dereference_flags &= ~(1ULL << arg); |
428 | } else if ((r = strstr(fmt + i, "__get_dynamic_array(" )) && |
429 | (!c || r < c)) { |
430 | dereference_flags &= ~(1ULL << arg); |
431 | } else if ((r = strstr(fmt + i, "__get_sockaddr(" )) && |
432 | (!c || r < c)) { |
433 | dereference_flags &= ~(1ULL << arg); |
434 | } |
435 | |
436 | next_arg: |
437 | i--; |
438 | arg++; |
439 | } |
440 | } |
441 | |
442 | /* |
443 | * If you triggered the below warning, the trace event reported |
444 | * uses an unsafe dereference pointer %p*. As the data stored |
445 | * at the trace event time may no longer exist when the trace |
446 | * event is printed, dereferencing to the original source is |
447 | * unsafe. The source of the dereference must be copied into the |
448 | * event itself, and the dereference must access the copy instead. |
449 | */ |
450 | if (WARN_ON_ONCE(dereference_flags)) { |
451 | arg = 1; |
452 | while (!(dereference_flags & 1)) { |
453 | dereference_flags >>= 1; |
454 | arg++; |
455 | } |
456 | pr_warn("event %s has unsafe dereference of argument %d\n" , |
457 | trace_event_name(call), arg); |
458 | pr_warn("print_fmt: %s\n" , fmt); |
459 | } |
460 | } |
461 | |
462 | int trace_event_raw_init(struct trace_event_call *call) |
463 | { |
464 | int id; |
465 | |
466 | id = register_trace_event(event: &call->event); |
467 | if (!id) |
468 | return -ENODEV; |
469 | |
470 | test_event_printk(call); |
471 | |
472 | return 0; |
473 | } |
474 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
475 | |
476 | bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) |
477 | { |
478 | struct trace_array *tr = trace_file->tr; |
479 | struct trace_array_cpu *data; |
480 | struct trace_pid_list *no_pid_list; |
481 | struct trace_pid_list *pid_list; |
482 | |
483 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
484 | no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
485 | |
486 | if (!pid_list && !no_pid_list) |
487 | return false; |
488 | |
489 | data = this_cpu_ptr(tr->array_buffer.data); |
490 | |
491 | return data->ignore_pid; |
492 | } |
493 | EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); |
494 | |
495 | void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, |
496 | struct trace_event_file *trace_file, |
497 | unsigned long len) |
498 | { |
499 | struct trace_event_call *event_call = trace_file->event_call; |
500 | |
501 | if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && |
502 | trace_event_ignore_this_pid(trace_file)) |
503 | return NULL; |
504 | |
505 | /* |
506 | * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables |
507 | * preemption (adding one to the preempt_count). Since we are |
508 | * interested in the preempt_count at the time the tracepoint was |
509 | * hit, we need to subtract one to offset the increment. |
510 | */ |
511 | fbuffer->trace_ctx = tracing_gen_ctx_dec(); |
512 | fbuffer->trace_file = trace_file; |
513 | |
514 | fbuffer->event = |
515 | trace_event_buffer_lock_reserve(current_buffer: &fbuffer->buffer, trace_file, |
516 | type: event_call->event.type, len, |
517 | trace_ctx: fbuffer->trace_ctx); |
518 | if (!fbuffer->event) |
519 | return NULL; |
520 | |
521 | fbuffer->regs = NULL; |
522 | fbuffer->entry = ring_buffer_event_data(event: fbuffer->event); |
523 | return fbuffer->entry; |
524 | } |
525 | EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); |
526 | |
527 | int trace_event_reg(struct trace_event_call *call, |
528 | enum trace_reg type, void *data) |
529 | { |
530 | struct trace_event_file *file = data; |
531 | |
532 | WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); |
533 | switch (type) { |
534 | case TRACE_REG_REGISTER: |
535 | return tracepoint_probe_register(tp: call->tp, |
536 | probe: call->class->probe, |
537 | data: file); |
538 | case TRACE_REG_UNREGISTER: |
539 | tracepoint_probe_unregister(tp: call->tp, |
540 | probe: call->class->probe, |
541 | data: file); |
542 | return 0; |
543 | |
544 | #ifdef CONFIG_PERF_EVENTS |
545 | case TRACE_REG_PERF_REGISTER: |
546 | return tracepoint_probe_register(tp: call->tp, |
547 | probe: call->class->perf_probe, |
548 | data: call); |
549 | case TRACE_REG_PERF_UNREGISTER: |
550 | tracepoint_probe_unregister(tp: call->tp, |
551 | probe: call->class->perf_probe, |
552 | data: call); |
553 | return 0; |
554 | case TRACE_REG_PERF_OPEN: |
555 | case TRACE_REG_PERF_CLOSE: |
556 | case TRACE_REG_PERF_ADD: |
557 | case TRACE_REG_PERF_DEL: |
558 | return 0; |
559 | #endif |
560 | } |
561 | return 0; |
562 | } |
563 | EXPORT_SYMBOL_GPL(trace_event_reg); |
564 | |
565 | void trace_event_enable_cmd_record(bool enable) |
566 | { |
567 | struct trace_event_file *file; |
568 | struct trace_array *tr; |
569 | |
570 | lockdep_assert_held(&event_mutex); |
571 | |
572 | do_for_each_event_file(tr, file) { |
573 | |
574 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) |
575 | continue; |
576 | |
577 | if (enable) { |
578 | tracing_start_cmdline_record(); |
579 | set_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
580 | } else { |
581 | tracing_stop_cmdline_record(); |
582 | clear_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
583 | } |
584 | } while_for_each_event_file(); |
585 | } |
586 | |
587 | void trace_event_enable_tgid_record(bool enable) |
588 | { |
589 | struct trace_event_file *file; |
590 | struct trace_array *tr; |
591 | |
592 | lockdep_assert_held(&event_mutex); |
593 | |
594 | do_for_each_event_file(tr, file) { |
595 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) |
596 | continue; |
597 | |
598 | if (enable) { |
599 | tracing_start_tgid_record(); |
600 | set_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
601 | } else { |
602 | tracing_stop_tgid_record(); |
603 | clear_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, |
604 | addr: &file->flags); |
605 | } |
606 | } while_for_each_event_file(); |
607 | } |
608 | |
609 | static int __ftrace_event_enable_disable(struct trace_event_file *file, |
610 | int enable, int soft_disable) |
611 | { |
612 | struct trace_event_call *call = file->event_call; |
613 | struct trace_array *tr = file->tr; |
614 | int ret = 0; |
615 | int disable; |
616 | |
617 | switch (enable) { |
618 | case 0: |
619 | /* |
620 | * When soft_disable is set and enable is cleared, the sm_ref |
621 | * reference counter is decremented. If it reaches 0, we want |
622 | * to clear the SOFT_DISABLED flag but leave the event in the |
623 | * state that it was. That is, if the event was enabled and |
624 | * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED |
625 | * is set we do not want the event to be enabled before we |
626 | * clear the bit. |
627 | * |
628 | * When soft_disable is not set but the SOFT_MODE flag is, |
629 | * we do nothing. Do not disable the tracepoint, otherwise |
630 | * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. |
631 | */ |
632 | if (soft_disable) { |
633 | if (atomic_dec_return(v: &file->sm_ref) > 0) |
634 | break; |
635 | disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; |
636 | clear_bit(nr: EVENT_FILE_FL_SOFT_MODE_BIT, addr: &file->flags); |
637 | /* Disable use of trace_buffered_event */ |
638 | trace_buffered_event_disable(); |
639 | } else |
640 | disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); |
641 | |
642 | if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { |
643 | clear_bit(nr: EVENT_FILE_FL_ENABLED_BIT, addr: &file->flags); |
644 | if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { |
645 | tracing_stop_cmdline_record(); |
646 | clear_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
647 | } |
648 | |
649 | if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { |
650 | tracing_stop_tgid_record(); |
651 | clear_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
652 | } |
653 | |
654 | call->class->reg(call, TRACE_REG_UNREGISTER, file); |
655 | } |
656 | /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ |
657 | if (file->flags & EVENT_FILE_FL_SOFT_MODE) |
658 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
659 | else |
660 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
661 | break; |
662 | case 1: |
663 | /* |
664 | * When soft_disable is set and enable is set, we want to |
665 | * register the tracepoint for the event, but leave the event |
666 | * as is. That means, if the event was already enabled, we do |
667 | * nothing (but set SOFT_MODE). If the event is disabled, we |
668 | * set SOFT_DISABLED before enabling the event tracepoint, so |
669 | * it still seems to be disabled. |
670 | */ |
671 | if (!soft_disable) |
672 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
673 | else { |
674 | if (atomic_inc_return(v: &file->sm_ref) > 1) |
675 | break; |
676 | set_bit(nr: EVENT_FILE_FL_SOFT_MODE_BIT, addr: &file->flags); |
677 | /* Enable use of trace_buffered_event */ |
678 | trace_buffered_event_enable(); |
679 | } |
680 | |
681 | if (!(file->flags & EVENT_FILE_FL_ENABLED)) { |
682 | bool cmd = false, tgid = false; |
683 | |
684 | /* Keep the event disabled, when going to SOFT_MODE. */ |
685 | if (soft_disable) |
686 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &file->flags); |
687 | |
688 | if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { |
689 | cmd = true; |
690 | tracing_start_cmdline_record(); |
691 | set_bit(nr: EVENT_FILE_FL_RECORDED_CMD_BIT, addr: &file->flags); |
692 | } |
693 | |
694 | if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { |
695 | tgid = true; |
696 | tracing_start_tgid_record(); |
697 | set_bit(nr: EVENT_FILE_FL_RECORDED_TGID_BIT, addr: &file->flags); |
698 | } |
699 | |
700 | ret = call->class->reg(call, TRACE_REG_REGISTER, file); |
701 | if (ret) { |
702 | if (cmd) |
703 | tracing_stop_cmdline_record(); |
704 | if (tgid) |
705 | tracing_stop_tgid_record(); |
706 | pr_info("event trace: Could not enable event " |
707 | "%s\n" , trace_event_name(call)); |
708 | break; |
709 | } |
710 | set_bit(nr: EVENT_FILE_FL_ENABLED_BIT, addr: &file->flags); |
711 | |
712 | /* WAS_ENABLED gets set but never cleared. */ |
713 | set_bit(nr: EVENT_FILE_FL_WAS_ENABLED_BIT, addr: &file->flags); |
714 | } |
715 | break; |
716 | } |
717 | |
718 | return ret; |
719 | } |
720 | |
721 | int trace_event_enable_disable(struct trace_event_file *file, |
722 | int enable, int soft_disable) |
723 | { |
724 | return __ftrace_event_enable_disable(file, enable, soft_disable); |
725 | } |
726 | |
727 | static int ftrace_event_enable_disable(struct trace_event_file *file, |
728 | int enable) |
729 | { |
730 | return __ftrace_event_enable_disable(file, enable, soft_disable: 0); |
731 | } |
732 | |
733 | static void ftrace_clear_events(struct trace_array *tr) |
734 | { |
735 | struct trace_event_file *file; |
736 | |
737 | mutex_lock(&event_mutex); |
738 | list_for_each_entry(file, &tr->events, list) { |
739 | ftrace_event_enable_disable(file, enable: 0); |
740 | } |
741 | mutex_unlock(lock: &event_mutex); |
742 | } |
743 | |
744 | static void |
745 | event_filter_pid_sched_process_exit(void *data, struct task_struct *task) |
746 | { |
747 | struct trace_pid_list *pid_list; |
748 | struct trace_array *tr = data; |
749 | |
750 | pid_list = rcu_dereference_raw(tr->filtered_pids); |
751 | trace_filter_add_remove_task(pid_list, NULL, task); |
752 | |
753 | pid_list = rcu_dereference_raw(tr->filtered_no_pids); |
754 | trace_filter_add_remove_task(pid_list, NULL, task); |
755 | } |
756 | |
757 | static void |
758 | event_filter_pid_sched_process_fork(void *data, |
759 | struct task_struct *self, |
760 | struct task_struct *task) |
761 | { |
762 | struct trace_pid_list *pid_list; |
763 | struct trace_array *tr = data; |
764 | |
765 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
766 | trace_filter_add_remove_task(pid_list, self, task); |
767 | |
768 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
769 | trace_filter_add_remove_task(pid_list, self, task); |
770 | } |
771 | |
772 | void trace_event_follow_fork(struct trace_array *tr, bool enable) |
773 | { |
774 | if (enable) { |
775 | register_trace_prio_sched_process_fork(probe: event_filter_pid_sched_process_fork, |
776 | data: tr, INT_MIN); |
777 | register_trace_prio_sched_process_free(probe: event_filter_pid_sched_process_exit, |
778 | data: tr, INT_MAX); |
779 | } else { |
780 | unregister_trace_sched_process_fork(probe: event_filter_pid_sched_process_fork, |
781 | data: tr); |
782 | unregister_trace_sched_process_free(probe: event_filter_pid_sched_process_exit, |
783 | data: tr); |
784 | } |
785 | } |
786 | |
787 | static void |
788 | event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, |
789 | struct task_struct *prev, |
790 | struct task_struct *next, |
791 | unsigned int prev_state) |
792 | { |
793 | struct trace_array *tr = data; |
794 | struct trace_pid_list *no_pid_list; |
795 | struct trace_pid_list *pid_list; |
796 | bool ret; |
797 | |
798 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
799 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
800 | |
801 | /* |
802 | * Sched switch is funny, as we only want to ignore it |
803 | * in the notrace case if both prev and next should be ignored. |
804 | */ |
805 | ret = trace_ignore_this_task(NULL, filtered_no_pids: no_pid_list, task: prev) && |
806 | trace_ignore_this_task(NULL, filtered_no_pids: no_pid_list, task: next); |
807 | |
808 | this_cpu_write(tr->array_buffer.data->ignore_pid, ret || |
809 | (trace_ignore_this_task(pid_list, NULL, prev) && |
810 | trace_ignore_this_task(pid_list, NULL, next))); |
811 | } |
812 | |
813 | static void |
814 | event_filter_pid_sched_switch_probe_post(void *data, bool preempt, |
815 | struct task_struct *prev, |
816 | struct task_struct *next, |
817 | unsigned int prev_state) |
818 | { |
819 | struct trace_array *tr = data; |
820 | struct trace_pid_list *no_pid_list; |
821 | struct trace_pid_list *pid_list; |
822 | |
823 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
824 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
825 | |
826 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
827 | trace_ignore_this_task(pid_list, no_pid_list, next)); |
828 | } |
829 | |
830 | static void |
831 | event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) |
832 | { |
833 | struct trace_array *tr = data; |
834 | struct trace_pid_list *no_pid_list; |
835 | struct trace_pid_list *pid_list; |
836 | |
837 | /* Nothing to do if we are already tracing */ |
838 | if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) |
839 | return; |
840 | |
841 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
842 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
843 | |
844 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
845 | trace_ignore_this_task(pid_list, no_pid_list, task)); |
846 | } |
847 | |
848 | static void |
849 | event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) |
850 | { |
851 | struct trace_array *tr = data; |
852 | struct trace_pid_list *no_pid_list; |
853 | struct trace_pid_list *pid_list; |
854 | |
855 | /* Nothing to do if we are not tracing */ |
856 | if (this_cpu_read(tr->array_buffer.data->ignore_pid)) |
857 | return; |
858 | |
859 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
860 | no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
861 | |
862 | /* Set tracing if current is enabled */ |
863 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
864 | trace_ignore_this_task(pid_list, no_pid_list, current)); |
865 | } |
866 | |
867 | static void unregister_pid_events(struct trace_array *tr) |
868 | { |
869 | unregister_trace_sched_switch(probe: event_filter_pid_sched_switch_probe_pre, data: tr); |
870 | unregister_trace_sched_switch(probe: event_filter_pid_sched_switch_probe_post, data: tr); |
871 | |
872 | unregister_trace_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
873 | unregister_trace_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
874 | |
875 | unregister_trace_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
876 | unregister_trace_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
877 | |
878 | unregister_trace_sched_waking(probe: event_filter_pid_sched_wakeup_probe_pre, data: tr); |
879 | unregister_trace_sched_waking(probe: event_filter_pid_sched_wakeup_probe_post, data: tr); |
880 | } |
881 | |
882 | static void __ftrace_clear_event_pids(struct trace_array *tr, int type) |
883 | { |
884 | struct trace_pid_list *pid_list; |
885 | struct trace_pid_list *no_pid_list; |
886 | struct trace_event_file *file; |
887 | int cpu; |
888 | |
889 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
890 | lockdep_is_held(&event_mutex)); |
891 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
892 | lockdep_is_held(&event_mutex)); |
893 | |
894 | /* Make sure there's something to do */ |
895 | if (!pid_type_enabled(type, pid_list, no_pid_list)) |
896 | return; |
897 | |
898 | if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
899 | unregister_pid_events(tr); |
900 | |
901 | list_for_each_entry(file, &tr->events, list) { |
902 | clear_bit(nr: EVENT_FILE_FL_PID_FILTER_BIT, addr: &file->flags); |
903 | } |
904 | |
905 | for_each_possible_cpu(cpu) |
906 | per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; |
907 | } |
908 | |
909 | if (type & TRACE_PIDS) |
910 | rcu_assign_pointer(tr->filtered_pids, NULL); |
911 | |
912 | if (type & TRACE_NO_PIDS) |
913 | rcu_assign_pointer(tr->filtered_no_pids, NULL); |
914 | |
915 | /* Wait till all users are no longer using pid filtering */ |
916 | tracepoint_synchronize_unregister(); |
917 | |
918 | if ((type & TRACE_PIDS) && pid_list) |
919 | trace_pid_list_free(pid_list); |
920 | |
921 | if ((type & TRACE_NO_PIDS) && no_pid_list) |
922 | trace_pid_list_free(pid_list: no_pid_list); |
923 | } |
924 | |
925 | static void ftrace_clear_event_pids(struct trace_array *tr, int type) |
926 | { |
927 | mutex_lock(&event_mutex); |
928 | __ftrace_clear_event_pids(tr, type); |
929 | mutex_unlock(lock: &event_mutex); |
930 | } |
931 | |
932 | static void __put_system(struct event_subsystem *system) |
933 | { |
934 | struct event_filter *filter = system->filter; |
935 | |
936 | WARN_ON_ONCE(system_refcount(system) == 0); |
937 | if (system_refcount_dec(system)) |
938 | return; |
939 | |
940 | list_del(entry: &system->list); |
941 | |
942 | if (filter) { |
943 | kfree(objp: filter->filter_string); |
944 | kfree(objp: filter); |
945 | } |
946 | kfree_const(x: system->name); |
947 | kfree(objp: system); |
948 | } |
949 | |
950 | static void __get_system(struct event_subsystem *system) |
951 | { |
952 | WARN_ON_ONCE(system_refcount(system) == 0); |
953 | system_refcount_inc(system); |
954 | } |
955 | |
956 | static void __get_system_dir(struct trace_subsystem_dir *dir) |
957 | { |
958 | WARN_ON_ONCE(dir->ref_count == 0); |
959 | dir->ref_count++; |
960 | __get_system(system: dir->subsystem); |
961 | } |
962 | |
963 | static void __put_system_dir(struct trace_subsystem_dir *dir) |
964 | { |
965 | WARN_ON_ONCE(dir->ref_count == 0); |
966 | /* If the subsystem is about to be freed, the dir must be too */ |
967 | WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); |
968 | |
969 | __put_system(system: dir->subsystem); |
970 | if (!--dir->ref_count) |
971 | kfree(objp: dir); |
972 | } |
973 | |
974 | static void put_system(struct trace_subsystem_dir *dir) |
975 | { |
976 | mutex_lock(&event_mutex); |
977 | __put_system_dir(dir); |
978 | mutex_unlock(lock: &event_mutex); |
979 | } |
980 | |
981 | static void remove_subsystem(struct trace_subsystem_dir *dir) |
982 | { |
983 | if (!dir) |
984 | return; |
985 | |
986 | if (!--dir->nr_events) { |
987 | eventfs_remove_dir(ei: dir->ei); |
988 | list_del(entry: &dir->list); |
989 | __put_system_dir(dir); |
990 | } |
991 | } |
992 | |
993 | void event_file_get(struct trace_event_file *file) |
994 | { |
995 | atomic_inc(v: &file->ref); |
996 | } |
997 | |
998 | void event_file_put(struct trace_event_file *file) |
999 | { |
1000 | if (WARN_ON_ONCE(!atomic_read(&file->ref))) { |
1001 | if (file->flags & EVENT_FILE_FL_FREED) |
1002 | kmem_cache_free(s: file_cachep, objp: file); |
1003 | return; |
1004 | } |
1005 | |
1006 | if (atomic_dec_and_test(v: &file->ref)) { |
1007 | /* Count should only go to zero when it is freed */ |
1008 | if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED))) |
1009 | return; |
1010 | kmem_cache_free(s: file_cachep, objp: file); |
1011 | } |
1012 | } |
1013 | |
1014 | static void remove_event_file_dir(struct trace_event_file *file) |
1015 | { |
1016 | eventfs_remove_dir(ei: file->ei); |
1017 | list_del(entry: &file->list); |
1018 | remove_subsystem(dir: file->system); |
1019 | free_event_filter(filter: file->filter); |
1020 | file->flags |= EVENT_FILE_FL_FREED; |
1021 | event_file_put(file); |
1022 | } |
1023 | |
1024 | /* |
1025 | * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. |
1026 | */ |
1027 | static int |
1028 | __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, |
1029 | const char *sub, const char *event, int set) |
1030 | { |
1031 | struct trace_event_file *file; |
1032 | struct trace_event_call *call; |
1033 | const char *name; |
1034 | int ret = -EINVAL; |
1035 | int eret = 0; |
1036 | |
1037 | list_for_each_entry(file, &tr->events, list) { |
1038 | |
1039 | call = file->event_call; |
1040 | name = trace_event_name(call); |
1041 | |
1042 | if (!name || !call->class || !call->class->reg) |
1043 | continue; |
1044 | |
1045 | if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
1046 | continue; |
1047 | |
1048 | if (match && |
1049 | strcmp(match, name) != 0 && |
1050 | strcmp(match, call->class->system) != 0) |
1051 | continue; |
1052 | |
1053 | if (sub && strcmp(sub, call->class->system) != 0) |
1054 | continue; |
1055 | |
1056 | if (event && strcmp(event, name) != 0) |
1057 | continue; |
1058 | |
1059 | ret = ftrace_event_enable_disable(file, enable: set); |
1060 | |
1061 | /* |
1062 | * Save the first error and return that. Some events |
1063 | * may still have been enabled, but let the user |
1064 | * know that something went wrong. |
1065 | */ |
1066 | if (ret && !eret) |
1067 | eret = ret; |
1068 | |
1069 | ret = eret; |
1070 | } |
1071 | |
1072 | return ret; |
1073 | } |
1074 | |
1075 | static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, |
1076 | const char *sub, const char *event, int set) |
1077 | { |
1078 | int ret; |
1079 | |
1080 | mutex_lock(&event_mutex); |
1081 | ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); |
1082 | mutex_unlock(lock: &event_mutex); |
1083 | |
1084 | return ret; |
1085 | } |
1086 | |
1087 | int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) |
1088 | { |
1089 | char *event = NULL, *sub = NULL, *match; |
1090 | int ret; |
1091 | |
1092 | if (!tr) |
1093 | return -ENOENT; |
1094 | /* |
1095 | * The buf format can be <subsystem>:<event-name> |
1096 | * *:<event-name> means any event by that name. |
1097 | * :<event-name> is the same. |
1098 | * |
1099 | * <subsystem>:* means all events in that subsystem |
1100 | * <subsystem>: means the same. |
1101 | * |
1102 | * <name> (no ':') means all events in a subsystem with |
1103 | * the name <name> or any event that matches <name> |
1104 | */ |
1105 | |
1106 | match = strsep(&buf, ":" ); |
1107 | if (buf) { |
1108 | sub = match; |
1109 | event = buf; |
1110 | match = NULL; |
1111 | |
1112 | if (!strlen(sub) || strcmp(sub, "*" ) == 0) |
1113 | sub = NULL; |
1114 | if (!strlen(event) || strcmp(event, "*" ) == 0) |
1115 | event = NULL; |
1116 | } |
1117 | |
1118 | ret = __ftrace_set_clr_event(tr, match, sub, event, set); |
1119 | |
1120 | /* Put back the colon to allow this to be called again */ |
1121 | if (buf) |
1122 | *(buf - 1) = ':'; |
1123 | |
1124 | return ret; |
1125 | } |
1126 | |
1127 | /** |
1128 | * trace_set_clr_event - enable or disable an event |
1129 | * @system: system name to match (NULL for any system) |
1130 | * @event: event name to match (NULL for all events, within system) |
1131 | * @set: 1 to enable, 0 to disable |
1132 | * |
1133 | * This is a way for other parts of the kernel to enable or disable |
1134 | * event recording. |
1135 | * |
1136 | * Returns 0 on success, -EINVAL if the parameters do not match any |
1137 | * registered events. |
1138 | */ |
1139 | int trace_set_clr_event(const char *system, const char *event, int set) |
1140 | { |
1141 | struct trace_array *tr = top_trace_array(); |
1142 | |
1143 | if (!tr) |
1144 | return -ENODEV; |
1145 | |
1146 | return __ftrace_set_clr_event(tr, NULL, sub: system, event, set); |
1147 | } |
1148 | EXPORT_SYMBOL_GPL(trace_set_clr_event); |
1149 | |
1150 | /** |
1151 | * trace_array_set_clr_event - enable or disable an event for a trace array. |
1152 | * @tr: concerned trace array. |
1153 | * @system: system name to match (NULL for any system) |
1154 | * @event: event name to match (NULL for all events, within system) |
1155 | * @enable: true to enable, false to disable |
1156 | * |
1157 | * This is a way for other parts of the kernel to enable or disable |
1158 | * event recording. |
1159 | * |
1160 | * Returns 0 on success, -EINVAL if the parameters do not match any |
1161 | * registered events. |
1162 | */ |
1163 | int trace_array_set_clr_event(struct trace_array *tr, const char *system, |
1164 | const char *event, bool enable) |
1165 | { |
1166 | int set; |
1167 | |
1168 | if (!tr) |
1169 | return -ENOENT; |
1170 | |
1171 | set = (enable == true) ? 1 : 0; |
1172 | return __ftrace_set_clr_event(tr, NULL, sub: system, event, set); |
1173 | } |
1174 | EXPORT_SYMBOL_GPL(trace_array_set_clr_event); |
1175 | |
1176 | /* 128 should be much more than enough */ |
1177 | #define EVENT_BUF_SIZE 127 |
1178 | |
1179 | static ssize_t |
1180 | ftrace_event_write(struct file *file, const char __user *ubuf, |
1181 | size_t cnt, loff_t *ppos) |
1182 | { |
1183 | struct trace_parser parser; |
1184 | struct seq_file *m = file->private_data; |
1185 | struct trace_array *tr = m->private; |
1186 | ssize_t read, ret; |
1187 | |
1188 | if (!cnt) |
1189 | return 0; |
1190 | |
1191 | ret = tracing_update_buffers(tr); |
1192 | if (ret < 0) |
1193 | return ret; |
1194 | |
1195 | if (trace_parser_get_init(parser: &parser, EVENT_BUF_SIZE + 1)) |
1196 | return -ENOMEM; |
1197 | |
1198 | read = trace_get_user(parser: &parser, ubuf, cnt, ppos); |
1199 | |
1200 | if (read >= 0 && trace_parser_loaded(parser: (&parser))) { |
1201 | int set = 1; |
1202 | |
1203 | if (*parser.buffer == '!') |
1204 | set = 0; |
1205 | |
1206 | ret = ftrace_set_clr_event(tr, buf: parser.buffer + !set, set); |
1207 | if (ret) |
1208 | goto out_put; |
1209 | } |
1210 | |
1211 | ret = read; |
1212 | |
1213 | out_put: |
1214 | trace_parser_put(parser: &parser); |
1215 | |
1216 | return ret; |
1217 | } |
1218 | |
1219 | static void * |
1220 | t_next(struct seq_file *m, void *v, loff_t *pos) |
1221 | { |
1222 | struct trace_event_file *file = v; |
1223 | struct trace_event_call *call; |
1224 | struct trace_array *tr = m->private; |
1225 | |
1226 | (*pos)++; |
1227 | |
1228 | list_for_each_entry_continue(file, &tr->events, list) { |
1229 | call = file->event_call; |
1230 | /* |
1231 | * The ftrace subsystem is for showing formats only. |
1232 | * They can not be enabled or disabled via the event files. |
1233 | */ |
1234 | if (call->class && call->class->reg && |
1235 | !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) |
1236 | return file; |
1237 | } |
1238 | |
1239 | return NULL; |
1240 | } |
1241 | |
1242 | static void *t_start(struct seq_file *m, loff_t *pos) |
1243 | { |
1244 | struct trace_event_file *file; |
1245 | struct trace_array *tr = m->private; |
1246 | loff_t l; |
1247 | |
1248 | mutex_lock(&event_mutex); |
1249 | |
1250 | file = list_entry(&tr->events, struct trace_event_file, list); |
1251 | for (l = 0; l <= *pos; ) { |
1252 | file = t_next(m, v: file, pos: &l); |
1253 | if (!file) |
1254 | break; |
1255 | } |
1256 | return file; |
1257 | } |
1258 | |
1259 | static void * |
1260 | s_next(struct seq_file *m, void *v, loff_t *pos) |
1261 | { |
1262 | struct trace_event_file *file = v; |
1263 | struct trace_array *tr = m->private; |
1264 | |
1265 | (*pos)++; |
1266 | |
1267 | list_for_each_entry_continue(file, &tr->events, list) { |
1268 | if (file->flags & EVENT_FILE_FL_ENABLED) |
1269 | return file; |
1270 | } |
1271 | |
1272 | return NULL; |
1273 | } |
1274 | |
1275 | static void *s_start(struct seq_file *m, loff_t *pos) |
1276 | { |
1277 | struct trace_event_file *file; |
1278 | struct trace_array *tr = m->private; |
1279 | loff_t l; |
1280 | |
1281 | mutex_lock(&event_mutex); |
1282 | |
1283 | file = list_entry(&tr->events, struct trace_event_file, list); |
1284 | for (l = 0; l <= *pos; ) { |
1285 | file = s_next(m, v: file, pos: &l); |
1286 | if (!file) |
1287 | break; |
1288 | } |
1289 | return file; |
1290 | } |
1291 | |
1292 | static int t_show(struct seq_file *m, void *v) |
1293 | { |
1294 | struct trace_event_file *file = v; |
1295 | struct trace_event_call *call = file->event_call; |
1296 | |
1297 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
1298 | seq_printf(m, fmt: "%s:" , call->class->system); |
1299 | seq_printf(m, fmt: "%s\n" , trace_event_name(call)); |
1300 | |
1301 | return 0; |
1302 | } |
1303 | |
1304 | static void t_stop(struct seq_file *m, void *p) |
1305 | { |
1306 | mutex_unlock(lock: &event_mutex); |
1307 | } |
1308 | |
1309 | static void * |
1310 | __next(struct seq_file *m, void *v, loff_t *pos, int type) |
1311 | { |
1312 | struct trace_array *tr = m->private; |
1313 | struct trace_pid_list *pid_list; |
1314 | |
1315 | if (type == TRACE_PIDS) |
1316 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
1317 | else |
1318 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
1319 | |
1320 | return trace_pid_next(pid_list, v, pos); |
1321 | } |
1322 | |
1323 | static void * |
1324 | p_next(struct seq_file *m, void *v, loff_t *pos) |
1325 | { |
1326 | return __next(m, v, pos, type: TRACE_PIDS); |
1327 | } |
1328 | |
1329 | static void * |
1330 | np_next(struct seq_file *m, void *v, loff_t *pos) |
1331 | { |
1332 | return __next(m, v, pos, type: TRACE_NO_PIDS); |
1333 | } |
1334 | |
1335 | static void *__start(struct seq_file *m, loff_t *pos, int type) |
1336 | __acquires(RCU) |
1337 | { |
1338 | struct trace_pid_list *pid_list; |
1339 | struct trace_array *tr = m->private; |
1340 | |
1341 | /* |
1342 | * Grab the mutex, to keep calls to p_next() having the same |
1343 | * tr->filtered_pids as p_start() has. |
1344 | * If we just passed the tr->filtered_pids around, then RCU would |
1345 | * have been enough, but doing that makes things more complex. |
1346 | */ |
1347 | mutex_lock(&event_mutex); |
1348 | rcu_read_lock_sched(); |
1349 | |
1350 | if (type == TRACE_PIDS) |
1351 | pid_list = rcu_dereference_sched(tr->filtered_pids); |
1352 | else |
1353 | pid_list = rcu_dereference_sched(tr->filtered_no_pids); |
1354 | |
1355 | if (!pid_list) |
1356 | return NULL; |
1357 | |
1358 | return trace_pid_start(pid_list, pos); |
1359 | } |
1360 | |
1361 | static void *p_start(struct seq_file *m, loff_t *pos) |
1362 | __acquires(RCU) |
1363 | { |
1364 | return __start(m, pos, type: TRACE_PIDS); |
1365 | } |
1366 | |
1367 | static void *np_start(struct seq_file *m, loff_t *pos) |
1368 | __acquires(RCU) |
1369 | { |
1370 | return __start(m, pos, type: TRACE_NO_PIDS); |
1371 | } |
1372 | |
1373 | static void p_stop(struct seq_file *m, void *p) |
1374 | __releases(RCU) |
1375 | { |
1376 | rcu_read_unlock_sched(); |
1377 | mutex_unlock(lock: &event_mutex); |
1378 | } |
1379 | |
1380 | static ssize_t |
1381 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
1382 | loff_t *ppos) |
1383 | { |
1384 | struct trace_event_file *file; |
1385 | unsigned long flags; |
1386 | char buf[4] = "0" ; |
1387 | |
1388 | mutex_lock(&event_mutex); |
1389 | file = event_file_data(filp); |
1390 | if (likely(file)) |
1391 | flags = file->flags; |
1392 | mutex_unlock(lock: &event_mutex); |
1393 | |
1394 | if (!file || flags & EVENT_FILE_FL_FREED) |
1395 | return -ENODEV; |
1396 | |
1397 | if (flags & EVENT_FILE_FL_ENABLED && |
1398 | !(flags & EVENT_FILE_FL_SOFT_DISABLED)) |
1399 | strcpy(p: buf, q: "1" ); |
1400 | |
1401 | if (flags & EVENT_FILE_FL_SOFT_DISABLED || |
1402 | flags & EVENT_FILE_FL_SOFT_MODE) |
1403 | strcat(p: buf, q: "*" ); |
1404 | |
1405 | strcat(p: buf, q: "\n" ); |
1406 | |
1407 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, strlen(buf)); |
1408 | } |
1409 | |
1410 | static ssize_t |
1411 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1412 | loff_t *ppos) |
1413 | { |
1414 | struct trace_event_file *file; |
1415 | unsigned long val; |
1416 | int ret; |
1417 | |
1418 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
1419 | if (ret) |
1420 | return ret; |
1421 | |
1422 | switch (val) { |
1423 | case 0: |
1424 | case 1: |
1425 | ret = -ENODEV; |
1426 | mutex_lock(&event_mutex); |
1427 | file = event_file_data(filp); |
1428 | if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) { |
1429 | ret = tracing_update_buffers(tr: file->tr); |
1430 | if (ret < 0) { |
1431 | mutex_unlock(lock: &event_mutex); |
1432 | return ret; |
1433 | } |
1434 | ret = ftrace_event_enable_disable(file, enable: val); |
1435 | } |
1436 | mutex_unlock(lock: &event_mutex); |
1437 | break; |
1438 | |
1439 | default: |
1440 | return -EINVAL; |
1441 | } |
1442 | |
1443 | *ppos += cnt; |
1444 | |
1445 | return ret ? ret : cnt; |
1446 | } |
1447 | |
1448 | static ssize_t |
1449 | system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
1450 | loff_t *ppos) |
1451 | { |
1452 | const char set_to_char[4] = { '?', '0', '1', 'X' }; |
1453 | struct trace_subsystem_dir *dir = filp->private_data; |
1454 | struct event_subsystem *system = dir->subsystem; |
1455 | struct trace_event_call *call; |
1456 | struct trace_event_file *file; |
1457 | struct trace_array *tr = dir->tr; |
1458 | char buf[2]; |
1459 | int set = 0; |
1460 | int ret; |
1461 | |
1462 | mutex_lock(&event_mutex); |
1463 | list_for_each_entry(file, &tr->events, list) { |
1464 | call = file->event_call; |
1465 | if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || |
1466 | !trace_event_name(call) || !call->class || !call->class->reg) |
1467 | continue; |
1468 | |
1469 | if (system && strcmp(call->class->system, system->name) != 0) |
1470 | continue; |
1471 | |
1472 | /* |
1473 | * We need to find out if all the events are set |
1474 | * or if all events or cleared, or if we have |
1475 | * a mixture. |
1476 | */ |
1477 | set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); |
1478 | |
1479 | /* |
1480 | * If we have a mixture, no need to look further. |
1481 | */ |
1482 | if (set == 3) |
1483 | break; |
1484 | } |
1485 | mutex_unlock(lock: &event_mutex); |
1486 | |
1487 | buf[0] = set_to_char[set]; |
1488 | buf[1] = '\n'; |
1489 | |
1490 | ret = simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: 2); |
1491 | |
1492 | return ret; |
1493 | } |
1494 | |
1495 | static ssize_t |
1496 | system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1497 | loff_t *ppos) |
1498 | { |
1499 | struct trace_subsystem_dir *dir = filp->private_data; |
1500 | struct event_subsystem *system = dir->subsystem; |
1501 | const char *name = NULL; |
1502 | unsigned long val; |
1503 | ssize_t ret; |
1504 | |
1505 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
1506 | if (ret) |
1507 | return ret; |
1508 | |
1509 | ret = tracing_update_buffers(tr: dir->tr); |
1510 | if (ret < 0) |
1511 | return ret; |
1512 | |
1513 | if (val != 0 && val != 1) |
1514 | return -EINVAL; |
1515 | |
1516 | /* |
1517 | * Opening of "enable" adds a ref count to system, |
1518 | * so the name is safe to use. |
1519 | */ |
1520 | if (system) |
1521 | name = system->name; |
1522 | |
1523 | ret = __ftrace_set_clr_event(tr: dir->tr, NULL, sub: name, NULL, set: val); |
1524 | if (ret) |
1525 | goto out; |
1526 | |
1527 | ret = cnt; |
1528 | |
1529 | out: |
1530 | *ppos += cnt; |
1531 | |
1532 | return ret; |
1533 | } |
1534 | |
1535 | enum { |
1536 | = 1, |
1537 | FORMAT_FIELD_SEPERATOR = 2, |
1538 | FORMAT_PRINTFMT = 3, |
1539 | }; |
1540 | |
1541 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
1542 | { |
1543 | struct trace_event_call *call = event_file_data(filp: m->private); |
1544 | struct list_head *common_head = &ftrace_common_fields; |
1545 | struct list_head *head = trace_get_fields(event_call: call); |
1546 | struct list_head *node = v; |
1547 | |
1548 | (*pos)++; |
1549 | |
1550 | switch ((unsigned long)v) { |
1551 | case FORMAT_HEADER: |
1552 | node = common_head; |
1553 | break; |
1554 | |
1555 | case FORMAT_FIELD_SEPERATOR: |
1556 | node = head; |
1557 | break; |
1558 | |
1559 | case FORMAT_PRINTFMT: |
1560 | /* all done */ |
1561 | return NULL; |
1562 | } |
1563 | |
1564 | node = node->prev; |
1565 | if (node == common_head) |
1566 | return (void *)FORMAT_FIELD_SEPERATOR; |
1567 | else if (node == head) |
1568 | return (void *)FORMAT_PRINTFMT; |
1569 | else |
1570 | return node; |
1571 | } |
1572 | |
1573 | static int f_show(struct seq_file *m, void *v) |
1574 | { |
1575 | struct trace_event_call *call = event_file_data(filp: m->private); |
1576 | struct ftrace_event_field *field; |
1577 | const char *array_descriptor; |
1578 | |
1579 | switch ((unsigned long)v) { |
1580 | case FORMAT_HEADER: |
1581 | seq_printf(m, fmt: "name: %s\n" , trace_event_name(call)); |
1582 | seq_printf(m, fmt: "ID: %d\n" , call->event.type); |
1583 | seq_puts(m, s: "format:\n" ); |
1584 | return 0; |
1585 | |
1586 | case FORMAT_FIELD_SEPERATOR: |
1587 | seq_putc(m, c: '\n'); |
1588 | return 0; |
1589 | |
1590 | case FORMAT_PRINTFMT: |
1591 | seq_printf(m, fmt: "\nprint fmt: %s\n" , |
1592 | call->print_fmt); |
1593 | return 0; |
1594 | } |
1595 | |
1596 | field = list_entry(v, struct ftrace_event_field, link); |
1597 | /* |
1598 | * Smartly shows the array type(except dynamic array). |
1599 | * Normal: |
1600 | * field:TYPE VAR |
1601 | * If TYPE := TYPE[LEN], it is shown: |
1602 | * field:TYPE VAR[LEN] |
1603 | */ |
1604 | array_descriptor = strchr(field->type, '['); |
1605 | |
1606 | if (str_has_prefix(str: field->type, prefix: "__data_loc" )) |
1607 | array_descriptor = NULL; |
1608 | |
1609 | if (!array_descriptor) |
1610 | seq_printf(m, fmt: "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
1611 | field->type, field->name, field->offset, |
1612 | field->size, !!field->is_signed); |
1613 | else if (field->len) |
1614 | seq_printf(m, fmt: "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
1615 | (int)(array_descriptor - field->type), |
1616 | field->type, field->name, |
1617 | field->len, field->offset, |
1618 | field->size, !!field->is_signed); |
1619 | else |
1620 | seq_printf(m, fmt: "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n" , |
1621 | (int)(array_descriptor - field->type), |
1622 | field->type, field->name, |
1623 | field->offset, field->size, !!field->is_signed); |
1624 | |
1625 | return 0; |
1626 | } |
1627 | |
1628 | static void *f_start(struct seq_file *m, loff_t *pos) |
1629 | { |
1630 | void *p = (void *)FORMAT_HEADER; |
1631 | loff_t l = 0; |
1632 | |
1633 | /* ->stop() is called even if ->start() fails */ |
1634 | mutex_lock(&event_mutex); |
1635 | if (!event_file_data(filp: m->private)) |
1636 | return ERR_PTR(error: -ENODEV); |
1637 | |
1638 | while (l < *pos && p) |
1639 | p = f_next(m, v: p, pos: &l); |
1640 | |
1641 | return p; |
1642 | } |
1643 | |
1644 | static void f_stop(struct seq_file *m, void *p) |
1645 | { |
1646 | mutex_unlock(lock: &event_mutex); |
1647 | } |
1648 | |
1649 | static const struct seq_operations trace_format_seq_ops = { |
1650 | .start = f_start, |
1651 | .next = f_next, |
1652 | .stop = f_stop, |
1653 | .show = f_show, |
1654 | }; |
1655 | |
1656 | static int trace_format_open(struct inode *inode, struct file *file) |
1657 | { |
1658 | struct seq_file *m; |
1659 | int ret; |
1660 | |
1661 | /* Do we want to hide event format files on tracefs lockdown? */ |
1662 | |
1663 | ret = seq_open(file, &trace_format_seq_ops); |
1664 | if (ret < 0) |
1665 | return ret; |
1666 | |
1667 | m = file->private_data; |
1668 | m->private = file; |
1669 | |
1670 | return 0; |
1671 | } |
1672 | |
1673 | static ssize_t |
1674 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
1675 | { |
1676 | int id = (long)event_file_data(filp); |
1677 | char buf[32]; |
1678 | int len; |
1679 | |
1680 | if (unlikely(!id)) |
1681 | return -ENODEV; |
1682 | |
1683 | len = sprintf(buf, fmt: "%d\n" , id); |
1684 | |
1685 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: len); |
1686 | } |
1687 | |
1688 | static ssize_t |
1689 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
1690 | loff_t *ppos) |
1691 | { |
1692 | struct trace_event_file *file; |
1693 | struct trace_seq *s; |
1694 | int r = -ENODEV; |
1695 | |
1696 | if (*ppos) |
1697 | return 0; |
1698 | |
1699 | s = kmalloc(size: sizeof(*s), GFP_KERNEL); |
1700 | |
1701 | if (!s) |
1702 | return -ENOMEM; |
1703 | |
1704 | trace_seq_init(s); |
1705 | |
1706 | mutex_lock(&event_mutex); |
1707 | file = event_file_data(filp); |
1708 | if (file && !(file->flags & EVENT_FILE_FL_FREED)) |
1709 | print_event_filter(file, s); |
1710 | mutex_unlock(lock: &event_mutex); |
1711 | |
1712 | if (file) |
1713 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
1714 | from: s->buffer, available: trace_seq_used(s)); |
1715 | |
1716 | kfree(objp: s); |
1717 | |
1718 | return r; |
1719 | } |
1720 | |
1721 | static ssize_t |
1722 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1723 | loff_t *ppos) |
1724 | { |
1725 | struct trace_event_file *file; |
1726 | char *buf; |
1727 | int err = -ENODEV; |
1728 | |
1729 | if (cnt >= PAGE_SIZE) |
1730 | return -EINVAL; |
1731 | |
1732 | buf = memdup_user_nul(ubuf, cnt); |
1733 | if (IS_ERR(ptr: buf)) |
1734 | return PTR_ERR(ptr: buf); |
1735 | |
1736 | mutex_lock(&event_mutex); |
1737 | file = event_file_data(filp); |
1738 | if (file) |
1739 | err = apply_event_filter(file, filter_string: buf); |
1740 | mutex_unlock(lock: &event_mutex); |
1741 | |
1742 | kfree(objp: buf); |
1743 | if (err < 0) |
1744 | return err; |
1745 | |
1746 | *ppos += cnt; |
1747 | |
1748 | return cnt; |
1749 | } |
1750 | |
1751 | static LIST_HEAD(event_subsystems); |
1752 | |
1753 | static int subsystem_open(struct inode *inode, struct file *filp) |
1754 | { |
1755 | struct trace_subsystem_dir *dir = NULL, *iter_dir; |
1756 | struct trace_array *tr = NULL, *iter_tr; |
1757 | struct event_subsystem *system = NULL; |
1758 | int ret; |
1759 | |
1760 | if (tracing_is_disabled()) |
1761 | return -ENODEV; |
1762 | |
1763 | /* Make sure the system still exists */ |
1764 | mutex_lock(&event_mutex); |
1765 | mutex_lock(&trace_types_lock); |
1766 | list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) { |
1767 | list_for_each_entry(iter_dir, &iter_tr->systems, list) { |
1768 | if (iter_dir == inode->i_private) { |
1769 | /* Don't open systems with no events */ |
1770 | tr = iter_tr; |
1771 | dir = iter_dir; |
1772 | if (dir->nr_events) { |
1773 | __get_system_dir(dir); |
1774 | system = dir->subsystem; |
1775 | } |
1776 | goto exit_loop; |
1777 | } |
1778 | } |
1779 | } |
1780 | exit_loop: |
1781 | mutex_unlock(lock: &trace_types_lock); |
1782 | mutex_unlock(lock: &event_mutex); |
1783 | |
1784 | if (!system) |
1785 | return -ENODEV; |
1786 | |
1787 | /* Still need to increment the ref count of the system */ |
1788 | if (trace_array_get(tr) < 0) { |
1789 | put_system(dir); |
1790 | return -ENODEV; |
1791 | } |
1792 | |
1793 | ret = tracing_open_generic(inode, filp); |
1794 | if (ret < 0) { |
1795 | trace_array_put(tr); |
1796 | put_system(dir); |
1797 | } |
1798 | |
1799 | return ret; |
1800 | } |
1801 | |
1802 | static int system_tr_open(struct inode *inode, struct file *filp) |
1803 | { |
1804 | struct trace_subsystem_dir *dir; |
1805 | struct trace_array *tr = inode->i_private; |
1806 | int ret; |
1807 | |
1808 | /* Make a temporary dir that has no system but points to tr */ |
1809 | dir = kzalloc(size: sizeof(*dir), GFP_KERNEL); |
1810 | if (!dir) |
1811 | return -ENOMEM; |
1812 | |
1813 | ret = tracing_open_generic_tr(inode, filp); |
1814 | if (ret < 0) { |
1815 | kfree(objp: dir); |
1816 | return ret; |
1817 | } |
1818 | dir->tr = tr; |
1819 | filp->private_data = dir; |
1820 | |
1821 | return 0; |
1822 | } |
1823 | |
1824 | static int subsystem_release(struct inode *inode, struct file *file) |
1825 | { |
1826 | struct trace_subsystem_dir *dir = file->private_data; |
1827 | |
1828 | trace_array_put(tr: dir->tr); |
1829 | |
1830 | /* |
1831 | * If dir->subsystem is NULL, then this is a temporary |
1832 | * descriptor that was made for a trace_array to enable |
1833 | * all subsystems. |
1834 | */ |
1835 | if (dir->subsystem) |
1836 | put_system(dir); |
1837 | else |
1838 | kfree(objp: dir); |
1839 | |
1840 | return 0; |
1841 | } |
1842 | |
1843 | static ssize_t |
1844 | subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
1845 | loff_t *ppos) |
1846 | { |
1847 | struct trace_subsystem_dir *dir = filp->private_data; |
1848 | struct event_subsystem *system = dir->subsystem; |
1849 | struct trace_seq *s; |
1850 | int r; |
1851 | |
1852 | if (*ppos) |
1853 | return 0; |
1854 | |
1855 | s = kmalloc(size: sizeof(*s), GFP_KERNEL); |
1856 | if (!s) |
1857 | return -ENOMEM; |
1858 | |
1859 | trace_seq_init(s); |
1860 | |
1861 | print_subsystem_event_filter(system, s); |
1862 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
1863 | from: s->buffer, available: trace_seq_used(s)); |
1864 | |
1865 | kfree(objp: s); |
1866 | |
1867 | return r; |
1868 | } |
1869 | |
1870 | static ssize_t |
1871 | subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
1872 | loff_t *ppos) |
1873 | { |
1874 | struct trace_subsystem_dir *dir = filp->private_data; |
1875 | char *buf; |
1876 | int err; |
1877 | |
1878 | if (cnt >= PAGE_SIZE) |
1879 | return -EINVAL; |
1880 | |
1881 | buf = memdup_user_nul(ubuf, cnt); |
1882 | if (IS_ERR(ptr: buf)) |
1883 | return PTR_ERR(ptr: buf); |
1884 | |
1885 | err = apply_subsystem_event_filter(dir, filter_string: buf); |
1886 | kfree(objp: buf); |
1887 | if (err < 0) |
1888 | return err; |
1889 | |
1890 | *ppos += cnt; |
1891 | |
1892 | return cnt; |
1893 | } |
1894 | |
1895 | static ssize_t |
1896 | (struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
1897 | { |
1898 | int (*func)(struct trace_seq *s) = filp->private_data; |
1899 | struct trace_seq *s; |
1900 | int r; |
1901 | |
1902 | if (*ppos) |
1903 | return 0; |
1904 | |
1905 | s = kmalloc(size: sizeof(*s), GFP_KERNEL); |
1906 | if (!s) |
1907 | return -ENOMEM; |
1908 | |
1909 | trace_seq_init(s); |
1910 | |
1911 | func(s); |
1912 | r = simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
1913 | from: s->buffer, available: trace_seq_used(s)); |
1914 | |
1915 | kfree(objp: s); |
1916 | |
1917 | return r; |
1918 | } |
1919 | |
1920 | static void ignore_task_cpu(void *data) |
1921 | { |
1922 | struct trace_array *tr = data; |
1923 | struct trace_pid_list *pid_list; |
1924 | struct trace_pid_list *no_pid_list; |
1925 | |
1926 | /* |
1927 | * This function is called by on_each_cpu() while the |
1928 | * event_mutex is held. |
1929 | */ |
1930 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
1931 | mutex_is_locked(&event_mutex)); |
1932 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
1933 | mutex_is_locked(&event_mutex)); |
1934 | |
1935 | this_cpu_write(tr->array_buffer.data->ignore_pid, |
1936 | trace_ignore_this_task(pid_list, no_pid_list, current)); |
1937 | } |
1938 | |
1939 | static void register_pid_events(struct trace_array *tr) |
1940 | { |
1941 | /* |
1942 | * Register a probe that is called before all other probes |
1943 | * to set ignore_pid if next or prev do not match. |
1944 | * Register a probe this is called after all other probes |
1945 | * to only keep ignore_pid set if next pid matches. |
1946 | */ |
1947 | register_trace_prio_sched_switch(probe: event_filter_pid_sched_switch_probe_pre, |
1948 | data: tr, INT_MAX); |
1949 | register_trace_prio_sched_switch(probe: event_filter_pid_sched_switch_probe_post, |
1950 | data: tr, prio: 0); |
1951 | |
1952 | register_trace_prio_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_pre, |
1953 | data: tr, INT_MAX); |
1954 | register_trace_prio_sched_wakeup(probe: event_filter_pid_sched_wakeup_probe_post, |
1955 | data: tr, prio: 0); |
1956 | |
1957 | register_trace_prio_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_pre, |
1958 | data: tr, INT_MAX); |
1959 | register_trace_prio_sched_wakeup_new(probe: event_filter_pid_sched_wakeup_probe_post, |
1960 | data: tr, prio: 0); |
1961 | |
1962 | register_trace_prio_sched_waking(probe: event_filter_pid_sched_wakeup_probe_pre, |
1963 | data: tr, INT_MAX); |
1964 | register_trace_prio_sched_waking(probe: event_filter_pid_sched_wakeup_probe_post, |
1965 | data: tr, prio: 0); |
1966 | } |
1967 | |
1968 | static ssize_t |
1969 | event_pid_write(struct file *filp, const char __user *ubuf, |
1970 | size_t cnt, loff_t *ppos, int type) |
1971 | { |
1972 | struct seq_file *m = filp->private_data; |
1973 | struct trace_array *tr = m->private; |
1974 | struct trace_pid_list *filtered_pids = NULL; |
1975 | struct trace_pid_list *other_pids = NULL; |
1976 | struct trace_pid_list *pid_list; |
1977 | struct trace_event_file *file; |
1978 | ssize_t ret; |
1979 | |
1980 | if (!cnt) |
1981 | return 0; |
1982 | |
1983 | ret = tracing_update_buffers(tr); |
1984 | if (ret < 0) |
1985 | return ret; |
1986 | |
1987 | mutex_lock(&event_mutex); |
1988 | |
1989 | if (type == TRACE_PIDS) { |
1990 | filtered_pids = rcu_dereference_protected(tr->filtered_pids, |
1991 | lockdep_is_held(&event_mutex)); |
1992 | other_pids = rcu_dereference_protected(tr->filtered_no_pids, |
1993 | lockdep_is_held(&event_mutex)); |
1994 | } else { |
1995 | filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, |
1996 | lockdep_is_held(&event_mutex)); |
1997 | other_pids = rcu_dereference_protected(tr->filtered_pids, |
1998 | lockdep_is_held(&event_mutex)); |
1999 | } |
2000 | |
2001 | ret = trace_pid_write(filtered_pids, new_pid_list: &pid_list, ubuf, cnt); |
2002 | if (ret < 0) |
2003 | goto out; |
2004 | |
2005 | if (type == TRACE_PIDS) |
2006 | rcu_assign_pointer(tr->filtered_pids, pid_list); |
2007 | else |
2008 | rcu_assign_pointer(tr->filtered_no_pids, pid_list); |
2009 | |
2010 | list_for_each_entry(file, &tr->events, list) { |
2011 | set_bit(nr: EVENT_FILE_FL_PID_FILTER_BIT, addr: &file->flags); |
2012 | } |
2013 | |
2014 | if (filtered_pids) { |
2015 | tracepoint_synchronize_unregister(); |
2016 | trace_pid_list_free(pid_list: filtered_pids); |
2017 | } else if (pid_list && !other_pids) { |
2018 | register_pid_events(tr); |
2019 | } |
2020 | |
2021 | /* |
2022 | * Ignoring of pids is done at task switch. But we have to |
2023 | * check for those tasks that are currently running. |
2024 | * Always do this in case a pid was appended or removed. |
2025 | */ |
2026 | on_each_cpu(func: ignore_task_cpu, info: tr, wait: 1); |
2027 | |
2028 | out: |
2029 | mutex_unlock(lock: &event_mutex); |
2030 | |
2031 | if (ret > 0) |
2032 | *ppos += ret; |
2033 | |
2034 | return ret; |
2035 | } |
2036 | |
2037 | static ssize_t |
2038 | ftrace_event_pid_write(struct file *filp, const char __user *ubuf, |
2039 | size_t cnt, loff_t *ppos) |
2040 | { |
2041 | return event_pid_write(filp, ubuf, cnt, ppos, type: TRACE_PIDS); |
2042 | } |
2043 | |
2044 | static ssize_t |
2045 | ftrace_event_npid_write(struct file *filp, const char __user *ubuf, |
2046 | size_t cnt, loff_t *ppos) |
2047 | { |
2048 | return event_pid_write(filp, ubuf, cnt, ppos, type: TRACE_NO_PIDS); |
2049 | } |
2050 | |
2051 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
2052 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
2053 | static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); |
2054 | static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); |
2055 | static int ftrace_event_release(struct inode *inode, struct file *file); |
2056 | |
2057 | static const struct seq_operations show_event_seq_ops = { |
2058 | .start = t_start, |
2059 | .next = t_next, |
2060 | .show = t_show, |
2061 | .stop = t_stop, |
2062 | }; |
2063 | |
2064 | static const struct seq_operations show_set_event_seq_ops = { |
2065 | .start = s_start, |
2066 | .next = s_next, |
2067 | .show = t_show, |
2068 | .stop = t_stop, |
2069 | }; |
2070 | |
2071 | static const struct seq_operations show_set_pid_seq_ops = { |
2072 | .start = p_start, |
2073 | .next = p_next, |
2074 | .show = trace_pid_show, |
2075 | .stop = p_stop, |
2076 | }; |
2077 | |
2078 | static const struct seq_operations show_set_no_pid_seq_ops = { |
2079 | .start = np_start, |
2080 | .next = np_next, |
2081 | .show = trace_pid_show, |
2082 | .stop = p_stop, |
2083 | }; |
2084 | |
2085 | static const struct file_operations ftrace_avail_fops = { |
2086 | .open = ftrace_event_avail_open, |
2087 | .read = seq_read, |
2088 | .llseek = seq_lseek, |
2089 | .release = seq_release, |
2090 | }; |
2091 | |
2092 | static const struct file_operations ftrace_set_event_fops = { |
2093 | .open = ftrace_event_set_open, |
2094 | .read = seq_read, |
2095 | .write = ftrace_event_write, |
2096 | .llseek = seq_lseek, |
2097 | .release = ftrace_event_release, |
2098 | }; |
2099 | |
2100 | static const struct file_operations ftrace_set_event_pid_fops = { |
2101 | .open = ftrace_event_set_pid_open, |
2102 | .read = seq_read, |
2103 | .write = ftrace_event_pid_write, |
2104 | .llseek = seq_lseek, |
2105 | .release = ftrace_event_release, |
2106 | }; |
2107 | |
2108 | static const struct file_operations ftrace_set_event_notrace_pid_fops = { |
2109 | .open = ftrace_event_set_npid_open, |
2110 | .read = seq_read, |
2111 | .write = ftrace_event_npid_write, |
2112 | .llseek = seq_lseek, |
2113 | .release = ftrace_event_release, |
2114 | }; |
2115 | |
2116 | static const struct file_operations ftrace_enable_fops = { |
2117 | .open = tracing_open_file_tr, |
2118 | .read = event_enable_read, |
2119 | .write = event_enable_write, |
2120 | .release = tracing_release_file_tr, |
2121 | .llseek = default_llseek, |
2122 | }; |
2123 | |
2124 | static const struct file_operations ftrace_event_format_fops = { |
2125 | .open = trace_format_open, |
2126 | .read = seq_read, |
2127 | .llseek = seq_lseek, |
2128 | .release = seq_release, |
2129 | }; |
2130 | |
2131 | static const struct file_operations ftrace_event_id_fops = { |
2132 | .read = event_id_read, |
2133 | .llseek = default_llseek, |
2134 | }; |
2135 | |
2136 | static const struct file_operations ftrace_event_filter_fops = { |
2137 | .open = tracing_open_file_tr, |
2138 | .read = event_filter_read, |
2139 | .write = event_filter_write, |
2140 | .release = tracing_release_file_tr, |
2141 | .llseek = default_llseek, |
2142 | }; |
2143 | |
2144 | static const struct file_operations ftrace_subsystem_filter_fops = { |
2145 | .open = subsystem_open, |
2146 | .read = subsystem_filter_read, |
2147 | .write = subsystem_filter_write, |
2148 | .llseek = default_llseek, |
2149 | .release = subsystem_release, |
2150 | }; |
2151 | |
2152 | static const struct file_operations ftrace_system_enable_fops = { |
2153 | .open = subsystem_open, |
2154 | .read = system_enable_read, |
2155 | .write = system_enable_write, |
2156 | .llseek = default_llseek, |
2157 | .release = subsystem_release, |
2158 | }; |
2159 | |
2160 | static const struct file_operations ftrace_tr_enable_fops = { |
2161 | .open = system_tr_open, |
2162 | .read = system_enable_read, |
2163 | .write = system_enable_write, |
2164 | .llseek = default_llseek, |
2165 | .release = subsystem_release, |
2166 | }; |
2167 | |
2168 | static const struct file_operations = { |
2169 | .open = tracing_open_generic, |
2170 | .read = show_header, |
2171 | .llseek = default_llseek, |
2172 | }; |
2173 | |
2174 | static int |
2175 | ftrace_event_open(struct inode *inode, struct file *file, |
2176 | const struct seq_operations *seq_ops) |
2177 | { |
2178 | struct seq_file *m; |
2179 | int ret; |
2180 | |
2181 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
2182 | if (ret) |
2183 | return ret; |
2184 | |
2185 | ret = seq_open(file, seq_ops); |
2186 | if (ret < 0) |
2187 | return ret; |
2188 | m = file->private_data; |
2189 | /* copy tr over to seq ops */ |
2190 | m->private = inode->i_private; |
2191 | |
2192 | return ret; |
2193 | } |
2194 | |
2195 | static int ftrace_event_release(struct inode *inode, struct file *file) |
2196 | { |
2197 | struct trace_array *tr = inode->i_private; |
2198 | |
2199 | trace_array_put(tr); |
2200 | |
2201 | return seq_release(inode, file); |
2202 | } |
2203 | |
2204 | static int |
2205 | ftrace_event_avail_open(struct inode *inode, struct file *file) |
2206 | { |
2207 | const struct seq_operations *seq_ops = &show_event_seq_ops; |
2208 | |
2209 | /* Checks for tracefs lockdown */ |
2210 | return ftrace_event_open(inode, file, seq_ops); |
2211 | } |
2212 | |
2213 | static int |
2214 | ftrace_event_set_open(struct inode *inode, struct file *file) |
2215 | { |
2216 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
2217 | struct trace_array *tr = inode->i_private; |
2218 | int ret; |
2219 | |
2220 | ret = tracing_check_open_get_tr(tr); |
2221 | if (ret) |
2222 | return ret; |
2223 | |
2224 | if ((file->f_mode & FMODE_WRITE) && |
2225 | (file->f_flags & O_TRUNC)) |
2226 | ftrace_clear_events(tr); |
2227 | |
2228 | ret = ftrace_event_open(inode, file, seq_ops); |
2229 | if (ret < 0) |
2230 | trace_array_put(tr); |
2231 | return ret; |
2232 | } |
2233 | |
2234 | static int |
2235 | ftrace_event_set_pid_open(struct inode *inode, struct file *file) |
2236 | { |
2237 | const struct seq_operations *seq_ops = &show_set_pid_seq_ops; |
2238 | struct trace_array *tr = inode->i_private; |
2239 | int ret; |
2240 | |
2241 | ret = tracing_check_open_get_tr(tr); |
2242 | if (ret) |
2243 | return ret; |
2244 | |
2245 | if ((file->f_mode & FMODE_WRITE) && |
2246 | (file->f_flags & O_TRUNC)) |
2247 | ftrace_clear_event_pids(tr, type: TRACE_PIDS); |
2248 | |
2249 | ret = ftrace_event_open(inode, file, seq_ops); |
2250 | if (ret < 0) |
2251 | trace_array_put(tr); |
2252 | return ret; |
2253 | } |
2254 | |
2255 | static int |
2256 | ftrace_event_set_npid_open(struct inode *inode, struct file *file) |
2257 | { |
2258 | const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; |
2259 | struct trace_array *tr = inode->i_private; |
2260 | int ret; |
2261 | |
2262 | ret = tracing_check_open_get_tr(tr); |
2263 | if (ret) |
2264 | return ret; |
2265 | |
2266 | if ((file->f_mode & FMODE_WRITE) && |
2267 | (file->f_flags & O_TRUNC)) |
2268 | ftrace_clear_event_pids(tr, type: TRACE_NO_PIDS); |
2269 | |
2270 | ret = ftrace_event_open(inode, file, seq_ops); |
2271 | if (ret < 0) |
2272 | trace_array_put(tr); |
2273 | return ret; |
2274 | } |
2275 | |
2276 | static struct event_subsystem * |
2277 | create_new_subsystem(const char *name) |
2278 | { |
2279 | struct event_subsystem *system; |
2280 | |
2281 | /* need to create new entry */ |
2282 | system = kmalloc(size: sizeof(*system), GFP_KERNEL); |
2283 | if (!system) |
2284 | return NULL; |
2285 | |
2286 | system->ref_count = 1; |
2287 | |
2288 | /* Only allocate if dynamic (kprobes and modules) */ |
2289 | system->name = kstrdup_const(s: name, GFP_KERNEL); |
2290 | if (!system->name) |
2291 | goto out_free; |
2292 | |
2293 | system->filter = kzalloc(size: sizeof(struct event_filter), GFP_KERNEL); |
2294 | if (!system->filter) |
2295 | goto out_free; |
2296 | |
2297 | list_add(new: &system->list, head: &event_subsystems); |
2298 | |
2299 | return system; |
2300 | |
2301 | out_free: |
2302 | kfree_const(x: system->name); |
2303 | kfree(objp: system); |
2304 | return NULL; |
2305 | } |
2306 | |
2307 | static int system_callback(const char *name, umode_t *mode, void **data, |
2308 | const struct file_operations **fops) |
2309 | { |
2310 | if (strcmp(name, "filter" ) == 0) |
2311 | *fops = &ftrace_subsystem_filter_fops; |
2312 | |
2313 | else if (strcmp(name, "enable" ) == 0) |
2314 | *fops = &ftrace_system_enable_fops; |
2315 | |
2316 | else |
2317 | return 0; |
2318 | |
2319 | *mode = TRACE_MODE_WRITE; |
2320 | return 1; |
2321 | } |
2322 | |
2323 | static struct eventfs_inode * |
2324 | event_subsystem_dir(struct trace_array *tr, const char *name, |
2325 | struct trace_event_file *file, struct eventfs_inode *parent) |
2326 | { |
2327 | struct event_subsystem *system, *iter; |
2328 | struct trace_subsystem_dir *dir; |
2329 | struct eventfs_inode *ei; |
2330 | int nr_entries; |
2331 | static struct eventfs_entry system_entries[] = { |
2332 | { |
2333 | .name = "filter" , |
2334 | .callback = system_callback, |
2335 | }, |
2336 | { |
2337 | .name = "enable" , |
2338 | .callback = system_callback, |
2339 | } |
2340 | }; |
2341 | |
2342 | /* First see if we did not already create this dir */ |
2343 | list_for_each_entry(dir, &tr->systems, list) { |
2344 | system = dir->subsystem; |
2345 | if (strcmp(system->name, name) == 0) { |
2346 | dir->nr_events++; |
2347 | file->system = dir; |
2348 | return dir->ei; |
2349 | } |
2350 | } |
2351 | |
2352 | /* Now see if the system itself exists. */ |
2353 | system = NULL; |
2354 | list_for_each_entry(iter, &event_subsystems, list) { |
2355 | if (strcmp(iter->name, name) == 0) { |
2356 | system = iter; |
2357 | break; |
2358 | } |
2359 | } |
2360 | |
2361 | dir = kmalloc(size: sizeof(*dir), GFP_KERNEL); |
2362 | if (!dir) |
2363 | goto out_fail; |
2364 | |
2365 | if (!system) { |
2366 | system = create_new_subsystem(name); |
2367 | if (!system) |
2368 | goto out_free; |
2369 | } else |
2370 | __get_system(system); |
2371 | |
2372 | /* ftrace only has directories no files */ |
2373 | if (strcmp(name, "ftrace" ) == 0) |
2374 | nr_entries = 0; |
2375 | else |
2376 | nr_entries = ARRAY_SIZE(system_entries); |
2377 | |
2378 | ei = eventfs_create_dir(name, parent, entries: system_entries, size: nr_entries, data: dir); |
2379 | if (IS_ERR(ptr: ei)) { |
2380 | pr_warn("Failed to create system directory %s\n" , name); |
2381 | __put_system(system); |
2382 | goto out_free; |
2383 | } |
2384 | |
2385 | dir->ei = ei; |
2386 | dir->tr = tr; |
2387 | dir->ref_count = 1; |
2388 | dir->nr_events = 1; |
2389 | dir->subsystem = system; |
2390 | file->system = dir; |
2391 | |
2392 | list_add(new: &dir->list, head: &tr->systems); |
2393 | |
2394 | return dir->ei; |
2395 | |
2396 | out_free: |
2397 | kfree(objp: dir); |
2398 | out_fail: |
2399 | /* Only print this message if failed on memory allocation */ |
2400 | if (!dir || !system) |
2401 | pr_warn("No memory to create event subsystem %s\n" , name); |
2402 | return NULL; |
2403 | } |
2404 | |
2405 | static int |
2406 | event_define_fields(struct trace_event_call *call) |
2407 | { |
2408 | struct list_head *head; |
2409 | int ret = 0; |
2410 | |
2411 | /* |
2412 | * Other events may have the same class. Only update |
2413 | * the fields if they are not already defined. |
2414 | */ |
2415 | head = trace_get_fields(event_call: call); |
2416 | if (list_empty(head)) { |
2417 | struct trace_event_fields *field = call->class->fields_array; |
2418 | unsigned int offset = sizeof(struct trace_entry); |
2419 | |
2420 | for (; field->type; field++) { |
2421 | if (field->type == TRACE_FUNCTION_TYPE) { |
2422 | field->define_fields(call); |
2423 | break; |
2424 | } |
2425 | |
2426 | offset = ALIGN(offset, field->align); |
2427 | ret = trace_define_field_ext(call, type: field->type, name: field->name, |
2428 | offset, size: field->size, |
2429 | is_signed: field->is_signed, filter_type: field->filter_type, |
2430 | len: field->len); |
2431 | if (WARN_ON_ONCE(ret)) { |
2432 | pr_err("error code is %d\n" , ret); |
2433 | break; |
2434 | } |
2435 | |
2436 | offset += field->size; |
2437 | } |
2438 | } |
2439 | |
2440 | return ret; |
2441 | } |
2442 | |
2443 | static int event_callback(const char *name, umode_t *mode, void **data, |
2444 | const struct file_operations **fops) |
2445 | { |
2446 | struct trace_event_file *file = *data; |
2447 | struct trace_event_call *call = file->event_call; |
2448 | |
2449 | if (strcmp(name, "format" ) == 0) { |
2450 | *mode = TRACE_MODE_READ; |
2451 | *fops = &ftrace_event_format_fops; |
2452 | *data = call; |
2453 | return 1; |
2454 | } |
2455 | |
2456 | /* |
2457 | * Only event directories that can be enabled should have |
2458 | * triggers or filters, with the exception of the "print" |
2459 | * event that can have a "trigger" file. |
2460 | */ |
2461 | if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { |
2462 | if (call->class->reg && strcmp(name, "enable" ) == 0) { |
2463 | *mode = TRACE_MODE_WRITE; |
2464 | *fops = &ftrace_enable_fops; |
2465 | return 1; |
2466 | } |
2467 | |
2468 | if (strcmp(name, "filter" ) == 0) { |
2469 | *mode = TRACE_MODE_WRITE; |
2470 | *fops = &ftrace_event_filter_fops; |
2471 | return 1; |
2472 | } |
2473 | } |
2474 | |
2475 | if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || |
2476 | strcmp(trace_event_name(call), "print" ) == 0) { |
2477 | if (strcmp(name, "trigger" ) == 0) { |
2478 | *mode = TRACE_MODE_WRITE; |
2479 | *fops = &event_trigger_fops; |
2480 | return 1; |
2481 | } |
2482 | } |
2483 | |
2484 | #ifdef CONFIG_PERF_EVENTS |
2485 | if (call->event.type && call->class->reg && |
2486 | strcmp(name, "id" ) == 0) { |
2487 | *mode = TRACE_MODE_READ; |
2488 | *data = (void *)(long)call->event.type; |
2489 | *fops = &ftrace_event_id_fops; |
2490 | return 1; |
2491 | } |
2492 | #endif |
2493 | |
2494 | #ifdef CONFIG_HIST_TRIGGERS |
2495 | if (strcmp(name, "hist" ) == 0) { |
2496 | *mode = TRACE_MODE_READ; |
2497 | *fops = &event_hist_fops; |
2498 | return 1; |
2499 | } |
2500 | #endif |
2501 | #ifdef CONFIG_HIST_TRIGGERS_DEBUG |
2502 | if (strcmp(name, "hist_debug" ) == 0) { |
2503 | *mode = TRACE_MODE_READ; |
2504 | *fops = &event_hist_debug_fops; |
2505 | return 1; |
2506 | } |
2507 | #endif |
2508 | #ifdef CONFIG_TRACE_EVENT_INJECT |
2509 | if (call->event.type && call->class->reg && |
2510 | strcmp(name, "inject" ) == 0) { |
2511 | *mode = 0200; |
2512 | *fops = &event_inject_fops; |
2513 | return 1; |
2514 | } |
2515 | #endif |
2516 | return 0; |
2517 | } |
2518 | |
2519 | static int |
2520 | event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file) |
2521 | { |
2522 | struct trace_event_call *call = file->event_call; |
2523 | struct trace_array *tr = file->tr; |
2524 | struct eventfs_inode *e_events; |
2525 | struct eventfs_inode *ei; |
2526 | const char *name; |
2527 | int nr_entries; |
2528 | int ret; |
2529 | static struct eventfs_entry event_entries[] = { |
2530 | { |
2531 | .name = "enable" , |
2532 | .callback = event_callback, |
2533 | }, |
2534 | { |
2535 | .name = "filter" , |
2536 | .callback = event_callback, |
2537 | }, |
2538 | { |
2539 | .name = "trigger" , |
2540 | .callback = event_callback, |
2541 | }, |
2542 | { |
2543 | .name = "format" , |
2544 | .callback = event_callback, |
2545 | }, |
2546 | #ifdef CONFIG_PERF_EVENTS |
2547 | { |
2548 | .name = "id" , |
2549 | .callback = event_callback, |
2550 | }, |
2551 | #endif |
2552 | #ifdef CONFIG_HIST_TRIGGERS |
2553 | { |
2554 | .name = "hist" , |
2555 | .callback = event_callback, |
2556 | }, |
2557 | #endif |
2558 | #ifdef CONFIG_HIST_TRIGGERS_DEBUG |
2559 | { |
2560 | .name = "hist_debug" , |
2561 | .callback = event_callback, |
2562 | }, |
2563 | #endif |
2564 | #ifdef CONFIG_TRACE_EVENT_INJECT |
2565 | { |
2566 | .name = "inject" , |
2567 | .callback = event_callback, |
2568 | }, |
2569 | #endif |
2570 | }; |
2571 | |
2572 | /* |
2573 | * If the trace point header did not define TRACE_SYSTEM |
2574 | * then the system would be called "TRACE_SYSTEM". This should |
2575 | * never happen. |
2576 | */ |
2577 | if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0)) |
2578 | return -ENODEV; |
2579 | |
2580 | e_events = event_subsystem_dir(tr, name: call->class->system, file, parent); |
2581 | if (!e_events) |
2582 | return -ENOMEM; |
2583 | |
2584 | nr_entries = ARRAY_SIZE(event_entries); |
2585 | |
2586 | name = trace_event_name(call); |
2587 | ei = eventfs_create_dir(name, parent: e_events, entries: event_entries, size: nr_entries, data: file); |
2588 | if (IS_ERR(ptr: ei)) { |
2589 | pr_warn("Could not create tracefs '%s' directory\n" , name); |
2590 | return -1; |
2591 | } |
2592 | |
2593 | file->ei = ei; |
2594 | |
2595 | ret = event_define_fields(call); |
2596 | if (ret < 0) { |
2597 | pr_warn("Could not initialize trace point events/%s\n" , name); |
2598 | return ret; |
2599 | } |
2600 | |
2601 | return 0; |
2602 | } |
2603 | |
2604 | static void remove_event_from_tracers(struct trace_event_call *call) |
2605 | { |
2606 | struct trace_event_file *file; |
2607 | struct trace_array *tr; |
2608 | |
2609 | do_for_each_event_file_safe(tr, file) { |
2610 | if (file->event_call != call) |
2611 | continue; |
2612 | |
2613 | remove_event_file_dir(file); |
2614 | /* |
2615 | * The do_for_each_event_file_safe() is |
2616 | * a double loop. After finding the call for this |
2617 | * trace_array, we use break to jump to the next |
2618 | * trace_array. |
2619 | */ |
2620 | break; |
2621 | } while_for_each_event_file(); |
2622 | } |
2623 | |
2624 | static void event_remove(struct trace_event_call *call) |
2625 | { |
2626 | struct trace_array *tr; |
2627 | struct trace_event_file *file; |
2628 | |
2629 | do_for_each_event_file(tr, file) { |
2630 | if (file->event_call != call) |
2631 | continue; |
2632 | |
2633 | if (file->flags & EVENT_FILE_FL_WAS_ENABLED) |
2634 | tr->clear_trace = true; |
2635 | |
2636 | ftrace_event_enable_disable(file, enable: 0); |
2637 | /* |
2638 | * The do_for_each_event_file() is |
2639 | * a double loop. After finding the call for this |
2640 | * trace_array, we use break to jump to the next |
2641 | * trace_array. |
2642 | */ |
2643 | break; |
2644 | } while_for_each_event_file(); |
2645 | |
2646 | if (call->event.funcs) |
2647 | __unregister_trace_event(event: &call->event); |
2648 | remove_event_from_tracers(call); |
2649 | list_del(entry: &call->list); |
2650 | } |
2651 | |
2652 | static int event_init(struct trace_event_call *call) |
2653 | { |
2654 | int ret = 0; |
2655 | const char *name; |
2656 | |
2657 | name = trace_event_name(call); |
2658 | if (WARN_ON(!name)) |
2659 | return -EINVAL; |
2660 | |
2661 | if (call->class->raw_init) { |
2662 | ret = call->class->raw_init(call); |
2663 | if (ret < 0 && ret != -ENOSYS) |
2664 | pr_warn("Could not initialize trace events/%s\n" , name); |
2665 | } |
2666 | |
2667 | return ret; |
2668 | } |
2669 | |
2670 | static int |
2671 | __register_event(struct trace_event_call *call, struct module *mod) |
2672 | { |
2673 | int ret; |
2674 | |
2675 | ret = event_init(call); |
2676 | if (ret < 0) |
2677 | return ret; |
2678 | |
2679 | list_add(new: &call->list, head: &ftrace_events); |
2680 | if (call->flags & TRACE_EVENT_FL_DYNAMIC) |
2681 | atomic_set(v: &call->refcnt, i: 0); |
2682 | else |
2683 | call->module = mod; |
2684 | |
2685 | return 0; |
2686 | } |
2687 | |
2688 | static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) |
2689 | { |
2690 | int rlen; |
2691 | int elen; |
2692 | |
2693 | /* Find the length of the eval value as a string */ |
2694 | elen = snprintf(buf: ptr, size: 0, fmt: "%ld" , map->eval_value); |
2695 | /* Make sure there's enough room to replace the string with the value */ |
2696 | if (len < elen) |
2697 | return NULL; |
2698 | |
2699 | snprintf(buf: ptr, size: elen + 1, fmt: "%ld" , map->eval_value); |
2700 | |
2701 | /* Get the rest of the string of ptr */ |
2702 | rlen = strlen(ptr + len); |
2703 | memmove(ptr + elen, ptr + len, rlen); |
2704 | /* Make sure we end the new string */ |
2705 | ptr[elen + rlen] = 0; |
2706 | |
2707 | return ptr + elen; |
2708 | } |
2709 | |
2710 | static void update_event_printk(struct trace_event_call *call, |
2711 | struct trace_eval_map *map) |
2712 | { |
2713 | char *ptr; |
2714 | int quote = 0; |
2715 | int len = strlen(map->eval_string); |
2716 | |
2717 | for (ptr = call->print_fmt; *ptr; ptr++) { |
2718 | if (*ptr == '\\') { |
2719 | ptr++; |
2720 | /* paranoid */ |
2721 | if (!*ptr) |
2722 | break; |
2723 | continue; |
2724 | } |
2725 | if (*ptr == '"') { |
2726 | quote ^= 1; |
2727 | continue; |
2728 | } |
2729 | if (quote) |
2730 | continue; |
2731 | if (isdigit(c: *ptr)) { |
2732 | /* skip numbers */ |
2733 | do { |
2734 | ptr++; |
2735 | /* Check for alpha chars like ULL */ |
2736 | } while (isalnum(*ptr)); |
2737 | if (!*ptr) |
2738 | break; |
2739 | /* |
2740 | * A number must have some kind of delimiter after |
2741 | * it, and we can ignore that too. |
2742 | */ |
2743 | continue; |
2744 | } |
2745 | if (isalpha(*ptr) || *ptr == '_') { |
2746 | if (strncmp(map->eval_string, ptr, len) == 0 && |
2747 | !isalnum(ptr[len]) && ptr[len] != '_') { |
2748 | ptr = eval_replace(ptr, map, len); |
2749 | /* enum/sizeof string smaller than value */ |
2750 | if (WARN_ON_ONCE(!ptr)) |
2751 | return; |
2752 | /* |
2753 | * No need to decrement here, as eval_replace() |
2754 | * returns the pointer to the character passed |
2755 | * the eval, and two evals can not be placed |
2756 | * back to back without something in between. |
2757 | * We can skip that something in between. |
2758 | */ |
2759 | continue; |
2760 | } |
2761 | skip_more: |
2762 | do { |
2763 | ptr++; |
2764 | } while (isalnum(*ptr) || *ptr == '_'); |
2765 | if (!*ptr) |
2766 | break; |
2767 | /* |
2768 | * If what comes after this variable is a '.' or |
2769 | * '->' then we can continue to ignore that string. |
2770 | */ |
2771 | if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { |
2772 | ptr += *ptr == '.' ? 1 : 2; |
2773 | if (!*ptr) |
2774 | break; |
2775 | goto skip_more; |
2776 | } |
2777 | /* |
2778 | * Once again, we can skip the delimiter that came |
2779 | * after the string. |
2780 | */ |
2781 | continue; |
2782 | } |
2783 | } |
2784 | } |
2785 | |
2786 | static void add_str_to_module(struct module *module, char *str) |
2787 | { |
2788 | struct module_string *modstr; |
2789 | |
2790 | modstr = kmalloc(size: sizeof(*modstr), GFP_KERNEL); |
2791 | |
2792 | /* |
2793 | * If we failed to allocate memory here, then we'll just |
2794 | * let the str memory leak when the module is removed. |
2795 | * If this fails to allocate, there's worse problems than |
2796 | * a leaked string on module removal. |
2797 | */ |
2798 | if (WARN_ON_ONCE(!modstr)) |
2799 | return; |
2800 | |
2801 | modstr->module = module; |
2802 | modstr->str = str; |
2803 | |
2804 | list_add(new: &modstr->next, head: &module_strings); |
2805 | } |
2806 | |
2807 | static void update_event_fields(struct trace_event_call *call, |
2808 | struct trace_eval_map *map) |
2809 | { |
2810 | struct ftrace_event_field *field; |
2811 | struct list_head *head; |
2812 | char *ptr; |
2813 | char *str; |
2814 | int len = strlen(map->eval_string); |
2815 | |
2816 | /* Dynamic events should never have field maps */ |
2817 | if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC)) |
2818 | return; |
2819 | |
2820 | head = trace_get_fields(event_call: call); |
2821 | list_for_each_entry(field, head, link) { |
2822 | ptr = strchr(field->type, '['); |
2823 | if (!ptr) |
2824 | continue; |
2825 | ptr++; |
2826 | |
2827 | if (!isalpha(*ptr) && *ptr != '_') |
2828 | continue; |
2829 | |
2830 | if (strncmp(map->eval_string, ptr, len) != 0) |
2831 | continue; |
2832 | |
2833 | str = kstrdup(s: field->type, GFP_KERNEL); |
2834 | if (WARN_ON_ONCE(!str)) |
2835 | return; |
2836 | ptr = str + (ptr - field->type); |
2837 | ptr = eval_replace(ptr, map, len); |
2838 | /* enum/sizeof string smaller than value */ |
2839 | if (WARN_ON_ONCE(!ptr)) { |
2840 | kfree(objp: str); |
2841 | continue; |
2842 | } |
2843 | |
2844 | /* |
2845 | * If the event is part of a module, then we need to free the string |
2846 | * when the module is removed. Otherwise, it will stay allocated |
2847 | * until a reboot. |
2848 | */ |
2849 | if (call->module) |
2850 | add_str_to_module(module: call->module, str); |
2851 | |
2852 | field->type = str; |
2853 | } |
2854 | } |
2855 | |
2856 | void trace_event_eval_update(struct trace_eval_map **map, int len) |
2857 | { |
2858 | struct trace_event_call *call, *p; |
2859 | const char *last_system = NULL; |
2860 | bool first = false; |
2861 | int last_i; |
2862 | int i; |
2863 | |
2864 | down_write(sem: &trace_event_sem); |
2865 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
2866 | /* events are usually grouped together with systems */ |
2867 | if (!last_system || call->class->system != last_system) { |
2868 | first = true; |
2869 | last_i = 0; |
2870 | last_system = call->class->system; |
2871 | } |
2872 | |
2873 | /* |
2874 | * Since calls are grouped by systems, the likelihood that the |
2875 | * next call in the iteration belongs to the same system as the |
2876 | * previous call is high. As an optimization, we skip searching |
2877 | * for a map[] that matches the call's system if the last call |
2878 | * was from the same system. That's what last_i is for. If the |
2879 | * call has the same system as the previous call, then last_i |
2880 | * will be the index of the first map[] that has a matching |
2881 | * system. |
2882 | */ |
2883 | for (i = last_i; i < len; i++) { |
2884 | if (call->class->system == map[i]->system) { |
2885 | /* Save the first system if need be */ |
2886 | if (first) { |
2887 | last_i = i; |
2888 | first = false; |
2889 | } |
2890 | update_event_printk(call, map: map[i]); |
2891 | update_event_fields(call, map: map[i]); |
2892 | } |
2893 | } |
2894 | cond_resched(); |
2895 | } |
2896 | up_write(sem: &trace_event_sem); |
2897 | } |
2898 | |
2899 | static struct trace_event_file * |
2900 | trace_create_new_event(struct trace_event_call *call, |
2901 | struct trace_array *tr) |
2902 | { |
2903 | struct trace_pid_list *no_pid_list; |
2904 | struct trace_pid_list *pid_list; |
2905 | struct trace_event_file *file; |
2906 | unsigned int first; |
2907 | |
2908 | file = kmem_cache_alloc(cachep: file_cachep, GFP_TRACE); |
2909 | if (!file) |
2910 | return NULL; |
2911 | |
2912 | pid_list = rcu_dereference_protected(tr->filtered_pids, |
2913 | lockdep_is_held(&event_mutex)); |
2914 | no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, |
2915 | lockdep_is_held(&event_mutex)); |
2916 | |
2917 | if (!trace_pid_list_first(pid_list, pid: &first) || |
2918 | !trace_pid_list_first(pid_list: no_pid_list, pid: &first)) |
2919 | file->flags |= EVENT_FILE_FL_PID_FILTER; |
2920 | |
2921 | file->event_call = call; |
2922 | file->tr = tr; |
2923 | atomic_set(v: &file->sm_ref, i: 0); |
2924 | atomic_set(v: &file->tm_ref, i: 0); |
2925 | INIT_LIST_HEAD(list: &file->triggers); |
2926 | list_add(new: &file->list, head: &tr->events); |
2927 | event_file_get(file); |
2928 | |
2929 | return file; |
2930 | } |
2931 | |
2932 | #define MAX_BOOT_TRIGGERS 32 |
2933 | |
2934 | static struct boot_triggers { |
2935 | const char *event; |
2936 | char *trigger; |
2937 | } bootup_triggers[MAX_BOOT_TRIGGERS]; |
2938 | |
2939 | static char bootup_trigger_buf[COMMAND_LINE_SIZE]; |
2940 | static int nr_boot_triggers; |
2941 | |
2942 | static __init int setup_trace_triggers(char *str) |
2943 | { |
2944 | char *trigger; |
2945 | char *buf; |
2946 | int i; |
2947 | |
2948 | strscpy(p: bootup_trigger_buf, q: str, COMMAND_LINE_SIZE); |
2949 | trace_set_ring_buffer_expanded(NULL); |
2950 | disable_tracing_selftest(reason: "running event triggers" ); |
2951 | |
2952 | buf = bootup_trigger_buf; |
2953 | for (i = 0; i < MAX_BOOT_TRIGGERS; i++) { |
2954 | trigger = strsep(&buf, "," ); |
2955 | if (!trigger) |
2956 | break; |
2957 | bootup_triggers[i].event = strsep(&trigger, "." ); |
2958 | bootup_triggers[i].trigger = trigger; |
2959 | if (!bootup_triggers[i].trigger) |
2960 | break; |
2961 | } |
2962 | |
2963 | nr_boot_triggers = i; |
2964 | return 1; |
2965 | } |
2966 | __setup("trace_trigger=" , setup_trace_triggers); |
2967 | |
2968 | /* Add an event to a trace directory */ |
2969 | static int |
2970 | __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) |
2971 | { |
2972 | struct trace_event_file *file; |
2973 | |
2974 | file = trace_create_new_event(call, tr); |
2975 | if (!file) |
2976 | return -ENOMEM; |
2977 | |
2978 | if (eventdir_initialized) |
2979 | return event_create_dir(parent: tr->event_dir, file); |
2980 | else |
2981 | return event_define_fields(call); |
2982 | } |
2983 | |
2984 | static void trace_early_triggers(struct trace_event_file *file, const char *name) |
2985 | { |
2986 | int ret; |
2987 | int i; |
2988 | |
2989 | for (i = 0; i < nr_boot_triggers; i++) { |
2990 | if (strcmp(name, bootup_triggers[i].event)) |
2991 | continue; |
2992 | mutex_lock(&event_mutex); |
2993 | ret = trigger_process_regex(file, buff: bootup_triggers[i].trigger); |
2994 | mutex_unlock(lock: &event_mutex); |
2995 | if (ret) |
2996 | pr_err("Failed to register trigger '%s' on event %s\n" , |
2997 | bootup_triggers[i].trigger, |
2998 | bootup_triggers[i].event); |
2999 | } |
3000 | } |
3001 | |
3002 | /* |
3003 | * Just create a descriptor for early init. A descriptor is required |
3004 | * for enabling events at boot. We want to enable events before |
3005 | * the filesystem is initialized. |
3006 | */ |
3007 | static int |
3008 | __trace_early_add_new_event(struct trace_event_call *call, |
3009 | struct trace_array *tr) |
3010 | { |
3011 | struct trace_event_file *file; |
3012 | int ret; |
3013 | |
3014 | file = trace_create_new_event(call, tr); |
3015 | if (!file) |
3016 | return -ENOMEM; |
3017 | |
3018 | ret = event_define_fields(call); |
3019 | if (ret) |
3020 | return ret; |
3021 | |
3022 | trace_early_triggers(file, name: trace_event_name(call)); |
3023 | |
3024 | return 0; |
3025 | } |
3026 | |
3027 | struct ftrace_module_file_ops; |
3028 | static void __add_event_to_tracers(struct trace_event_call *call); |
3029 | |
3030 | /* Add an additional event_call dynamically */ |
3031 | int trace_add_event_call(struct trace_event_call *call) |
3032 | { |
3033 | int ret; |
3034 | lockdep_assert_held(&event_mutex); |
3035 | |
3036 | mutex_lock(&trace_types_lock); |
3037 | |
3038 | ret = __register_event(call, NULL); |
3039 | if (ret >= 0) |
3040 | __add_event_to_tracers(call); |
3041 | |
3042 | mutex_unlock(lock: &trace_types_lock); |
3043 | return ret; |
3044 | } |
3045 | EXPORT_SYMBOL_GPL(trace_add_event_call); |
3046 | |
3047 | /* |
3048 | * Must be called under locking of trace_types_lock, event_mutex and |
3049 | * trace_event_sem. |
3050 | */ |
3051 | static void __trace_remove_event_call(struct trace_event_call *call) |
3052 | { |
3053 | event_remove(call); |
3054 | trace_destroy_fields(call); |
3055 | free_event_filter(filter: call->filter); |
3056 | call->filter = NULL; |
3057 | } |
3058 | |
3059 | static int probe_remove_event_call(struct trace_event_call *call) |
3060 | { |
3061 | struct trace_array *tr; |
3062 | struct trace_event_file *file; |
3063 | |
3064 | #ifdef CONFIG_PERF_EVENTS |
3065 | if (call->perf_refcount) |
3066 | return -EBUSY; |
3067 | #endif |
3068 | do_for_each_event_file(tr, file) { |
3069 | if (file->event_call != call) |
3070 | continue; |
3071 | /* |
3072 | * We can't rely on ftrace_event_enable_disable(enable => 0) |
3073 | * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress |
3074 | * TRACE_REG_UNREGISTER. |
3075 | */ |
3076 | if (file->flags & EVENT_FILE_FL_ENABLED) |
3077 | goto busy; |
3078 | |
3079 | if (file->flags & EVENT_FILE_FL_WAS_ENABLED) |
3080 | tr->clear_trace = true; |
3081 | /* |
3082 | * The do_for_each_event_file_safe() is |
3083 | * a double loop. After finding the call for this |
3084 | * trace_array, we use break to jump to the next |
3085 | * trace_array. |
3086 | */ |
3087 | break; |
3088 | } while_for_each_event_file(); |
3089 | |
3090 | __trace_remove_event_call(call); |
3091 | |
3092 | return 0; |
3093 | busy: |
3094 | /* No need to clear the trace now */ |
3095 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
3096 | tr->clear_trace = false; |
3097 | } |
3098 | return -EBUSY; |
3099 | } |
3100 | |
3101 | /* Remove an event_call */ |
3102 | int trace_remove_event_call(struct trace_event_call *call) |
3103 | { |
3104 | int ret; |
3105 | |
3106 | lockdep_assert_held(&event_mutex); |
3107 | |
3108 | mutex_lock(&trace_types_lock); |
3109 | down_write(sem: &trace_event_sem); |
3110 | ret = probe_remove_event_call(call); |
3111 | up_write(sem: &trace_event_sem); |
3112 | mutex_unlock(lock: &trace_types_lock); |
3113 | |
3114 | return ret; |
3115 | } |
3116 | EXPORT_SYMBOL_GPL(trace_remove_event_call); |
3117 | |
3118 | #define for_each_event(event, start, end) \ |
3119 | for (event = start; \ |
3120 | (unsigned long)event < (unsigned long)end; \ |
3121 | event++) |
3122 | |
3123 | #ifdef CONFIG_MODULES |
3124 | |
3125 | static void trace_module_add_events(struct module *mod) |
3126 | { |
3127 | struct trace_event_call **call, **start, **end; |
3128 | |
3129 | if (!mod->num_trace_events) |
3130 | return; |
3131 | |
3132 | /* Don't add infrastructure for mods without tracepoints */ |
3133 | if (trace_module_has_bad_taint(mod)) { |
3134 | pr_err("%s: module has bad taint, not creating trace events\n" , |
3135 | mod->name); |
3136 | return; |
3137 | } |
3138 | |
3139 | start = mod->trace_events; |
3140 | end = mod->trace_events + mod->num_trace_events; |
3141 | |
3142 | for_each_event(call, start, end) { |
3143 | __register_event(call: *call, mod); |
3144 | __add_event_to_tracers(call: *call); |
3145 | } |
3146 | } |
3147 | |
3148 | static void trace_module_remove_events(struct module *mod) |
3149 | { |
3150 | struct trace_event_call *call, *p; |
3151 | struct module_string *modstr, *m; |
3152 | |
3153 | down_write(sem: &trace_event_sem); |
3154 | list_for_each_entry_safe(call, p, &ftrace_events, list) { |
3155 | if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) |
3156 | continue; |
3157 | if (call->module == mod) |
3158 | __trace_remove_event_call(call); |
3159 | } |
3160 | /* Check for any strings allocade for this module */ |
3161 | list_for_each_entry_safe(modstr, m, &module_strings, next) { |
3162 | if (modstr->module != mod) |
3163 | continue; |
3164 | list_del(entry: &modstr->next); |
3165 | kfree(objp: modstr->str); |
3166 | kfree(objp: modstr); |
3167 | } |
3168 | up_write(sem: &trace_event_sem); |
3169 | |
3170 | /* |
3171 | * It is safest to reset the ring buffer if the module being unloaded |
3172 | * registered any events that were used. The only worry is if |
3173 | * a new module gets loaded, and takes on the same id as the events |
3174 | * of this module. When printing out the buffer, traced events left |
3175 | * over from this module may be passed to the new module events and |
3176 | * unexpected results may occur. |
3177 | */ |
3178 | tracing_reset_all_online_cpus_unlocked(); |
3179 | } |
3180 | |
3181 | static int trace_module_notify(struct notifier_block *self, |
3182 | unsigned long val, void *data) |
3183 | { |
3184 | struct module *mod = data; |
3185 | |
3186 | mutex_lock(&event_mutex); |
3187 | mutex_lock(&trace_types_lock); |
3188 | switch (val) { |
3189 | case MODULE_STATE_COMING: |
3190 | trace_module_add_events(mod); |
3191 | break; |
3192 | case MODULE_STATE_GOING: |
3193 | trace_module_remove_events(mod); |
3194 | break; |
3195 | } |
3196 | mutex_unlock(lock: &trace_types_lock); |
3197 | mutex_unlock(lock: &event_mutex); |
3198 | |
3199 | return NOTIFY_OK; |
3200 | } |
3201 | |
3202 | static struct notifier_block trace_module_nb = { |
3203 | .notifier_call = trace_module_notify, |
3204 | .priority = 1, /* higher than trace.c module notify */ |
3205 | }; |
3206 | #endif /* CONFIG_MODULES */ |
3207 | |
3208 | /* Create a new event directory structure for a trace directory. */ |
3209 | static void |
3210 | __trace_add_event_dirs(struct trace_array *tr) |
3211 | { |
3212 | struct trace_event_call *call; |
3213 | int ret; |
3214 | |
3215 | list_for_each_entry(call, &ftrace_events, list) { |
3216 | ret = __trace_add_new_event(call, tr); |
3217 | if (ret < 0) |
3218 | pr_warn("Could not create directory for event %s\n" , |
3219 | trace_event_name(call)); |
3220 | } |
3221 | } |
3222 | |
3223 | /* Returns any file that matches the system and event */ |
3224 | struct trace_event_file * |
3225 | __find_event_file(struct trace_array *tr, const char *system, const char *event) |
3226 | { |
3227 | struct trace_event_file *file; |
3228 | struct trace_event_call *call; |
3229 | const char *name; |
3230 | |
3231 | list_for_each_entry(file, &tr->events, list) { |
3232 | |
3233 | call = file->event_call; |
3234 | name = trace_event_name(call); |
3235 | |
3236 | if (!name || !call->class) |
3237 | continue; |
3238 | |
3239 | if (strcmp(event, name) == 0 && |
3240 | strcmp(system, call->class->system) == 0) |
3241 | return file; |
3242 | } |
3243 | return NULL; |
3244 | } |
3245 | |
3246 | /* Returns valid trace event files that match system and event */ |
3247 | struct trace_event_file * |
3248 | find_event_file(struct trace_array *tr, const char *system, const char *event) |
3249 | { |
3250 | struct trace_event_file *file; |
3251 | |
3252 | file = __find_event_file(tr, system, event); |
3253 | if (!file || !file->event_call->class->reg || |
3254 | file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) |
3255 | return NULL; |
3256 | |
3257 | return file; |
3258 | } |
3259 | |
3260 | /** |
3261 | * trace_get_event_file - Find and return a trace event file |
3262 | * @instance: The name of the trace instance containing the event |
3263 | * @system: The name of the system containing the event |
3264 | * @event: The name of the event |
3265 | * |
3266 | * Return a trace event file given the trace instance name, trace |
3267 | * system, and trace event name. If the instance name is NULL, it |
3268 | * refers to the top-level trace array. |
3269 | * |
3270 | * This function will look it up and return it if found, after calling |
3271 | * trace_array_get() to prevent the instance from going away, and |
3272 | * increment the event's module refcount to prevent it from being |
3273 | * removed. |
3274 | * |
3275 | * To release the file, call trace_put_event_file(), which will call |
3276 | * trace_array_put() and decrement the event's module refcount. |
3277 | * |
3278 | * Return: The trace event on success, ERR_PTR otherwise. |
3279 | */ |
3280 | struct trace_event_file *trace_get_event_file(const char *instance, |
3281 | const char *system, |
3282 | const char *event) |
3283 | { |
3284 | struct trace_array *tr = top_trace_array(); |
3285 | struct trace_event_file *file = NULL; |
3286 | int ret = -EINVAL; |
3287 | |
3288 | if (instance) { |
3289 | tr = trace_array_find_get(instance); |
3290 | if (!tr) |
3291 | return ERR_PTR(error: -ENOENT); |
3292 | } else { |
3293 | ret = trace_array_get(tr); |
3294 | if (ret) |
3295 | return ERR_PTR(error: ret); |
3296 | } |
3297 | |
3298 | mutex_lock(&event_mutex); |
3299 | |
3300 | file = find_event_file(tr, system, event); |
3301 | if (!file) { |
3302 | trace_array_put(tr); |
3303 | ret = -EINVAL; |
3304 | goto out; |
3305 | } |
3306 | |
3307 | /* Don't let event modules unload while in use */ |
3308 | ret = trace_event_try_get_ref(call: file->event_call); |
3309 | if (!ret) { |
3310 | trace_array_put(tr); |
3311 | ret = -EBUSY; |
3312 | goto out; |
3313 | } |
3314 | |
3315 | ret = 0; |
3316 | out: |
3317 | mutex_unlock(lock: &event_mutex); |
3318 | |
3319 | if (ret) |
3320 | file = ERR_PTR(error: ret); |
3321 | |
3322 | return file; |
3323 | } |
3324 | EXPORT_SYMBOL_GPL(trace_get_event_file); |
3325 | |
3326 | /** |
3327 | * trace_put_event_file - Release a file from trace_get_event_file() |
3328 | * @file: The trace event file |
3329 | * |
3330 | * If a file was retrieved using trace_get_event_file(), this should |
3331 | * be called when it's no longer needed. It will cancel the previous |
3332 | * trace_array_get() called by that function, and decrement the |
3333 | * event's module refcount. |
3334 | */ |
3335 | void trace_put_event_file(struct trace_event_file *file) |
3336 | { |
3337 | mutex_lock(&event_mutex); |
3338 | trace_event_put_ref(call: file->event_call); |
3339 | mutex_unlock(lock: &event_mutex); |
3340 | |
3341 | trace_array_put(tr: file->tr); |
3342 | } |
3343 | EXPORT_SYMBOL_GPL(trace_put_event_file); |
3344 | |
3345 | #ifdef CONFIG_DYNAMIC_FTRACE |
3346 | |
3347 | /* Avoid typos */ |
3348 | #define ENABLE_EVENT_STR "enable_event" |
3349 | #define DISABLE_EVENT_STR "disable_event" |
3350 | |
3351 | struct event_probe_data { |
3352 | struct trace_event_file *file; |
3353 | unsigned long count; |
3354 | int ref; |
3355 | bool enable; |
3356 | }; |
3357 | |
3358 | static void update_event_probe(struct event_probe_data *data) |
3359 | { |
3360 | if (data->enable) |
3361 | clear_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &data->file->flags); |
3362 | else |
3363 | set_bit(nr: EVENT_FILE_FL_SOFT_DISABLED_BIT, addr: &data->file->flags); |
3364 | } |
3365 | |
3366 | static void |
3367 | event_enable_probe(unsigned long ip, unsigned long parent_ip, |
3368 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
3369 | void *data) |
3370 | { |
3371 | struct ftrace_func_mapper *mapper = data; |
3372 | struct event_probe_data *edata; |
3373 | void **pdata; |
3374 | |
3375 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
3376 | if (!pdata || !*pdata) |
3377 | return; |
3378 | |
3379 | edata = *pdata; |
3380 | update_event_probe(data: edata); |
3381 | } |
3382 | |
3383 | static void |
3384 | event_enable_count_probe(unsigned long ip, unsigned long parent_ip, |
3385 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
3386 | void *data) |
3387 | { |
3388 | struct ftrace_func_mapper *mapper = data; |
3389 | struct event_probe_data *edata; |
3390 | void **pdata; |
3391 | |
3392 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
3393 | if (!pdata || !*pdata) |
3394 | return; |
3395 | |
3396 | edata = *pdata; |
3397 | |
3398 | if (!edata->count) |
3399 | return; |
3400 | |
3401 | /* Skip if the event is in a state we want to switch to */ |
3402 | if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) |
3403 | return; |
3404 | |
3405 | if (edata->count != -1) |
3406 | (edata->count)--; |
3407 | |
3408 | update_event_probe(data: edata); |
3409 | } |
3410 | |
3411 | static int |
3412 | event_enable_print(struct seq_file *m, unsigned long ip, |
3413 | struct ftrace_probe_ops *ops, void *data) |
3414 | { |
3415 | struct ftrace_func_mapper *mapper = data; |
3416 | struct event_probe_data *edata; |
3417 | void **pdata; |
3418 | |
3419 | pdata = ftrace_func_mapper_find_ip(mapper, ip); |
3420 | |
3421 | if (WARN_ON_ONCE(!pdata || !*pdata)) |
3422 | return 0; |
3423 | |
3424 | edata = *pdata; |
3425 | |
3426 | seq_printf(m, fmt: "%ps:" , (void *)ip); |
3427 | |
3428 | seq_printf(m, fmt: "%s:%s:%s" , |
3429 | edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, |
3430 | edata->file->event_call->class->system, |
3431 | trace_event_name(call: edata->file->event_call)); |
3432 | |
3433 | if (edata->count == -1) |
3434 | seq_puts(m, s: ":unlimited\n" ); |
3435 | else |
3436 | seq_printf(m, fmt: ":count=%ld\n" , edata->count); |
3437 | |
3438 | return 0; |
3439 | } |
3440 | |
3441 | static int |
3442 | event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
3443 | unsigned long ip, void *init_data, void **data) |
3444 | { |
3445 | struct ftrace_func_mapper *mapper = *data; |
3446 | struct event_probe_data *edata = init_data; |
3447 | int ret; |
3448 | |
3449 | if (!mapper) { |
3450 | mapper = allocate_ftrace_func_mapper(); |
3451 | if (!mapper) |
3452 | return -ENODEV; |
3453 | *data = mapper; |
3454 | } |
3455 | |
3456 | ret = ftrace_func_mapper_add_ip(mapper, ip, data: edata); |
3457 | if (ret < 0) |
3458 | return ret; |
3459 | |
3460 | edata->ref++; |
3461 | |
3462 | return 0; |
3463 | } |
3464 | |
3465 | static int free_probe_data(void *data) |
3466 | { |
3467 | struct event_probe_data *edata = data; |
3468 | |
3469 | edata->ref--; |
3470 | if (!edata->ref) { |
3471 | /* Remove the SOFT_MODE flag */ |
3472 | __ftrace_event_enable_disable(file: edata->file, enable: 0, soft_disable: 1); |
3473 | trace_event_put_ref(call: edata->file->event_call); |
3474 | kfree(objp: edata); |
3475 | } |
3476 | return 0; |
3477 | } |
3478 | |
3479 | static void |
3480 | event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
3481 | unsigned long ip, void *data) |
3482 | { |
3483 | struct ftrace_func_mapper *mapper = data; |
3484 | struct event_probe_data *edata; |
3485 | |
3486 | if (!ip) { |
3487 | if (!mapper) |
3488 | return; |
3489 | free_ftrace_func_mapper(mapper, free_func: free_probe_data); |
3490 | return; |
3491 | } |
3492 | |
3493 | edata = ftrace_func_mapper_remove_ip(mapper, ip); |
3494 | |
3495 | if (WARN_ON_ONCE(!edata)) |
3496 | return; |
3497 | |
3498 | if (WARN_ON_ONCE(edata->ref <= 0)) |
3499 | return; |
3500 | |
3501 | free_probe_data(data: edata); |
3502 | } |
3503 | |
3504 | static struct ftrace_probe_ops event_enable_probe_ops = { |
3505 | .func = event_enable_probe, |
3506 | .print = event_enable_print, |
3507 | .init = event_enable_init, |
3508 | .free = event_enable_free, |
3509 | }; |
3510 | |
3511 | static struct ftrace_probe_ops event_enable_count_probe_ops = { |
3512 | .func = event_enable_count_probe, |
3513 | .print = event_enable_print, |
3514 | .init = event_enable_init, |
3515 | .free = event_enable_free, |
3516 | }; |
3517 | |
3518 | static struct ftrace_probe_ops event_disable_probe_ops = { |
3519 | .func = event_enable_probe, |
3520 | .print = event_enable_print, |
3521 | .init = event_enable_init, |
3522 | .free = event_enable_free, |
3523 | }; |
3524 | |
3525 | static struct ftrace_probe_ops event_disable_count_probe_ops = { |
3526 | .func = event_enable_count_probe, |
3527 | .print = event_enable_print, |
3528 | .init = event_enable_init, |
3529 | .free = event_enable_free, |
3530 | }; |
3531 | |
3532 | static int |
3533 | event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, |
3534 | char *glob, char *cmd, char *param, int enabled) |
3535 | { |
3536 | struct trace_event_file *file; |
3537 | struct ftrace_probe_ops *ops; |
3538 | struct event_probe_data *data; |
3539 | const char *system; |
3540 | const char *event; |
3541 | char *number; |
3542 | bool enable; |
3543 | int ret; |
3544 | |
3545 | if (!tr) |
3546 | return -ENODEV; |
3547 | |
3548 | /* hash funcs only work with set_ftrace_filter */ |
3549 | if (!enabled || !param) |
3550 | return -EINVAL; |
3551 | |
3552 | system = strsep(¶m, ":" ); |
3553 | if (!param) |
3554 | return -EINVAL; |
3555 | |
3556 | event = strsep(¶m, ":" ); |
3557 | |
3558 | mutex_lock(&event_mutex); |
3559 | |
3560 | ret = -EINVAL; |
3561 | file = find_event_file(tr, system, event); |
3562 | if (!file) |
3563 | goto out; |
3564 | |
3565 | enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; |
3566 | |
3567 | if (enable) |
3568 | ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; |
3569 | else |
3570 | ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; |
3571 | |
3572 | if (glob[0] == '!') { |
3573 | ret = unregister_ftrace_function_probe_func(glob: glob+1, tr, ops); |
3574 | goto out; |
3575 | } |
3576 | |
3577 | ret = -ENOMEM; |
3578 | |
3579 | data = kzalloc(size: sizeof(*data), GFP_KERNEL); |
3580 | if (!data) |
3581 | goto out; |
3582 | |
3583 | data->enable = enable; |
3584 | data->count = -1; |
3585 | data->file = file; |
3586 | |
3587 | if (!param) |
3588 | goto out_reg; |
3589 | |
3590 | number = strsep(¶m, ":" ); |
3591 | |
3592 | ret = -EINVAL; |
3593 | if (!strlen(number)) |
3594 | goto out_free; |
3595 | |
3596 | /* |
3597 | * We use the callback data field (which is a pointer) |
3598 | * as our counter. |
3599 | */ |
3600 | ret = kstrtoul(s: number, base: 0, res: &data->count); |
3601 | if (ret) |
3602 | goto out_free; |
3603 | |
3604 | out_reg: |
3605 | /* Don't let event modules unload while probe registered */ |
3606 | ret = trace_event_try_get_ref(call: file->event_call); |
3607 | if (!ret) { |
3608 | ret = -EBUSY; |
3609 | goto out_free; |
3610 | } |
3611 | |
3612 | ret = __ftrace_event_enable_disable(file, enable: 1, soft_disable: 1); |
3613 | if (ret < 0) |
3614 | goto out_put; |
3615 | |
3616 | ret = register_ftrace_function_probe(glob, tr, ops, data); |
3617 | /* |
3618 | * The above returns on success the # of functions enabled, |
3619 | * but if it didn't find any functions it returns zero. |
3620 | * Consider no functions a failure too. |
3621 | */ |
3622 | if (!ret) { |
3623 | ret = -ENOENT; |
3624 | goto out_disable; |
3625 | } else if (ret < 0) |
3626 | goto out_disable; |
3627 | /* Just return zero, not the number of enabled functions */ |
3628 | ret = 0; |
3629 | out: |
3630 | mutex_unlock(lock: &event_mutex); |
3631 | return ret; |
3632 | |
3633 | out_disable: |
3634 | __ftrace_event_enable_disable(file, enable: 0, soft_disable: 1); |
3635 | out_put: |
3636 | trace_event_put_ref(call: file->event_call); |
3637 | out_free: |
3638 | kfree(objp: data); |
3639 | goto out; |
3640 | } |
3641 | |
3642 | static struct ftrace_func_command event_enable_cmd = { |
3643 | .name = ENABLE_EVENT_STR, |
3644 | .func = event_enable_func, |
3645 | }; |
3646 | |
3647 | static struct ftrace_func_command event_disable_cmd = { |
3648 | .name = DISABLE_EVENT_STR, |
3649 | .func = event_enable_func, |
3650 | }; |
3651 | |
3652 | static __init int register_event_cmds(void) |
3653 | { |
3654 | int ret; |
3655 | |
3656 | ret = register_ftrace_command(cmd: &event_enable_cmd); |
3657 | if (WARN_ON(ret < 0)) |
3658 | return ret; |
3659 | ret = register_ftrace_command(cmd: &event_disable_cmd); |
3660 | if (WARN_ON(ret < 0)) |
3661 | unregister_ftrace_command(cmd: &event_enable_cmd); |
3662 | return ret; |
3663 | } |
3664 | #else |
3665 | static inline int register_event_cmds(void) { return 0; } |
3666 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
3667 | |
3668 | /* |
3669 | * The top level array and trace arrays created by boot-time tracing |
3670 | * have already had its trace_event_file descriptors created in order |
3671 | * to allow for early events to be recorded. |
3672 | * This function is called after the tracefs has been initialized, |
3673 | * and we now have to create the files associated to the events. |
3674 | */ |
3675 | static void __trace_early_add_event_dirs(struct trace_array *tr) |
3676 | { |
3677 | struct trace_event_file *file; |
3678 | int ret; |
3679 | |
3680 | |
3681 | list_for_each_entry(file, &tr->events, list) { |
3682 | ret = event_create_dir(parent: tr->event_dir, file); |
3683 | if (ret < 0) |
3684 | pr_warn("Could not create directory for event %s\n" , |
3685 | trace_event_name(file->event_call)); |
3686 | } |
3687 | } |
3688 | |
3689 | /* |
3690 | * For early boot up, the top trace array and the trace arrays created |
3691 | * by boot-time tracing require to have a list of events that can be |
3692 | * enabled. This must be done before the filesystem is set up in order |
3693 | * to allow events to be traced early. |
3694 | */ |
3695 | void __trace_early_add_events(struct trace_array *tr) |
3696 | { |
3697 | struct trace_event_call *call; |
3698 | int ret; |
3699 | |
3700 | list_for_each_entry(call, &ftrace_events, list) { |
3701 | /* Early boot up should not have any modules loaded */ |
3702 | if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && |
3703 | WARN_ON_ONCE(call->module)) |
3704 | continue; |
3705 | |
3706 | ret = __trace_early_add_new_event(call, tr); |
3707 | if (ret < 0) |
3708 | pr_warn("Could not create early event %s\n" , |
3709 | trace_event_name(call)); |
3710 | } |
3711 | } |
3712 | |
3713 | /* Remove the event directory structure for a trace directory. */ |
3714 | static void |
3715 | __trace_remove_event_dirs(struct trace_array *tr) |
3716 | { |
3717 | struct trace_event_file *file, *next; |
3718 | |
3719 | list_for_each_entry_safe(file, next, &tr->events, list) |
3720 | remove_event_file_dir(file); |
3721 | } |
3722 | |
3723 | static void __add_event_to_tracers(struct trace_event_call *call) |
3724 | { |
3725 | struct trace_array *tr; |
3726 | |
3727 | list_for_each_entry(tr, &ftrace_trace_arrays, list) |
3728 | __trace_add_new_event(call, tr); |
3729 | } |
3730 | |
3731 | extern struct trace_event_call *__start_ftrace_events[]; |
3732 | extern struct trace_event_call *__stop_ftrace_events[]; |
3733 | |
3734 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; |
3735 | |
3736 | static __init int setup_trace_event(char *str) |
3737 | { |
3738 | strscpy(p: bootup_event_buf, q: str, COMMAND_LINE_SIZE); |
3739 | trace_set_ring_buffer_expanded(NULL); |
3740 | disable_tracing_selftest(reason: "running event tracing" ); |
3741 | |
3742 | return 1; |
3743 | } |
3744 | __setup("trace_event=" , setup_trace_event); |
3745 | |
3746 | static int events_callback(const char *name, umode_t *mode, void **data, |
3747 | const struct file_operations **fops) |
3748 | { |
3749 | if (strcmp(name, "enable" ) == 0) { |
3750 | *mode = TRACE_MODE_WRITE; |
3751 | *fops = &ftrace_tr_enable_fops; |
3752 | return 1; |
3753 | } |
3754 | |
3755 | if (strcmp(name, "header_page" ) == 0) |
3756 | *data = ring_buffer_print_page_header; |
3757 | |
3758 | else if (strcmp(name, "header_event" ) == 0) |
3759 | *data = ring_buffer_print_entry_header; |
3760 | |
3761 | else |
3762 | return 0; |
3763 | |
3764 | *mode = TRACE_MODE_READ; |
3765 | *fops = &ftrace_show_header_fops; |
3766 | return 1; |
3767 | } |
3768 | |
3769 | /* Expects to have event_mutex held when called */ |
3770 | static int |
3771 | create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) |
3772 | { |
3773 | struct eventfs_inode *e_events; |
3774 | struct dentry *entry; |
3775 | int nr_entries; |
3776 | static struct eventfs_entry events_entries[] = { |
3777 | { |
3778 | .name = "enable" , |
3779 | .callback = events_callback, |
3780 | }, |
3781 | { |
3782 | .name = "header_page" , |
3783 | .callback = events_callback, |
3784 | }, |
3785 | { |
3786 | .name = "header_event" , |
3787 | .callback = events_callback, |
3788 | }, |
3789 | }; |
3790 | |
3791 | entry = trace_create_file(name: "set_event" , TRACE_MODE_WRITE, parent, |
3792 | data: tr, fops: &ftrace_set_event_fops); |
3793 | if (!entry) |
3794 | return -ENOMEM; |
3795 | |
3796 | nr_entries = ARRAY_SIZE(events_entries); |
3797 | |
3798 | e_events = eventfs_create_events_dir(name: "events" , parent, entries: events_entries, |
3799 | size: nr_entries, data: tr); |
3800 | if (IS_ERR(ptr: e_events)) { |
3801 | pr_warn("Could not create tracefs 'events' directory\n" ); |
3802 | return -ENOMEM; |
3803 | } |
3804 | |
3805 | /* There are not as crucial, just warn if they are not created */ |
3806 | |
3807 | trace_create_file(name: "set_event_pid" , TRACE_MODE_WRITE, parent, |
3808 | data: tr, fops: &ftrace_set_event_pid_fops); |
3809 | |
3810 | trace_create_file(name: "set_event_notrace_pid" , |
3811 | TRACE_MODE_WRITE, parent, data: tr, |
3812 | fops: &ftrace_set_event_notrace_pid_fops); |
3813 | |
3814 | tr->event_dir = e_events; |
3815 | |
3816 | return 0; |
3817 | } |
3818 | |
3819 | /** |
3820 | * event_trace_add_tracer - add a instance of a trace_array to events |
3821 | * @parent: The parent dentry to place the files/directories for events in |
3822 | * @tr: The trace array associated with these events |
3823 | * |
3824 | * When a new instance is created, it needs to set up its events |
3825 | * directory, as well as other files associated with events. It also |
3826 | * creates the event hierarchy in the @parent/events directory. |
3827 | * |
3828 | * Returns 0 on success. |
3829 | * |
3830 | * Must be called with event_mutex held. |
3831 | */ |
3832 | int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) |
3833 | { |
3834 | int ret; |
3835 | |
3836 | lockdep_assert_held(&event_mutex); |
3837 | |
3838 | ret = create_event_toplevel_files(parent, tr); |
3839 | if (ret) |
3840 | goto out; |
3841 | |
3842 | down_write(sem: &trace_event_sem); |
3843 | /* If tr already has the event list, it is initialized in early boot. */ |
3844 | if (unlikely(!list_empty(&tr->events))) |
3845 | __trace_early_add_event_dirs(tr); |
3846 | else |
3847 | __trace_add_event_dirs(tr); |
3848 | up_write(sem: &trace_event_sem); |
3849 | |
3850 | out: |
3851 | return ret; |
3852 | } |
3853 | |
3854 | /* |
3855 | * The top trace array already had its file descriptors created. |
3856 | * Now the files themselves need to be created. |
3857 | */ |
3858 | static __init int |
3859 | early_event_add_tracer(struct dentry *parent, struct trace_array *tr) |
3860 | { |
3861 | int ret; |
3862 | |
3863 | mutex_lock(&event_mutex); |
3864 | |
3865 | ret = create_event_toplevel_files(parent, tr); |
3866 | if (ret) |
3867 | goto out_unlock; |
3868 | |
3869 | down_write(sem: &trace_event_sem); |
3870 | __trace_early_add_event_dirs(tr); |
3871 | up_write(sem: &trace_event_sem); |
3872 | |
3873 | out_unlock: |
3874 | mutex_unlock(lock: &event_mutex); |
3875 | |
3876 | return ret; |
3877 | } |
3878 | |
3879 | /* Must be called with event_mutex held */ |
3880 | int event_trace_del_tracer(struct trace_array *tr) |
3881 | { |
3882 | lockdep_assert_held(&event_mutex); |
3883 | |
3884 | /* Disable any event triggers and associated soft-disabled events */ |
3885 | clear_event_triggers(tr); |
3886 | |
3887 | /* Clear the pid list */ |
3888 | __ftrace_clear_event_pids(tr, type: TRACE_PIDS | TRACE_NO_PIDS); |
3889 | |
3890 | /* Disable any running events */ |
3891 | __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, set: 0); |
3892 | |
3893 | /* Make sure no more events are being executed */ |
3894 | tracepoint_synchronize_unregister(); |
3895 | |
3896 | down_write(sem: &trace_event_sem); |
3897 | __trace_remove_event_dirs(tr); |
3898 | eventfs_remove_events_dir(ei: tr->event_dir); |
3899 | up_write(sem: &trace_event_sem); |
3900 | |
3901 | tr->event_dir = NULL; |
3902 | |
3903 | return 0; |
3904 | } |
3905 | |
3906 | static __init int event_trace_memsetup(void) |
3907 | { |
3908 | field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); |
3909 | file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); |
3910 | return 0; |
3911 | } |
3912 | |
3913 | __init void |
3914 | early_enable_events(struct trace_array *tr, char *buf, bool disable_first) |
3915 | { |
3916 | char *token; |
3917 | int ret; |
3918 | |
3919 | while (true) { |
3920 | token = strsep(&buf, "," ); |
3921 | |
3922 | if (!token) |
3923 | break; |
3924 | |
3925 | if (*token) { |
3926 | /* Restarting syscalls requires that we stop them first */ |
3927 | if (disable_first) |
3928 | ftrace_set_clr_event(tr, buf: token, set: 0); |
3929 | |
3930 | ret = ftrace_set_clr_event(tr, buf: token, set: 1); |
3931 | if (ret) |
3932 | pr_warn("Failed to enable trace event: %s\n" , token); |
3933 | } |
3934 | |
3935 | /* Put back the comma to allow this to be called again */ |
3936 | if (buf) |
3937 | *(buf - 1) = ','; |
3938 | } |
3939 | } |
3940 | |
3941 | static __init int event_trace_enable(void) |
3942 | { |
3943 | struct trace_array *tr = top_trace_array(); |
3944 | struct trace_event_call **iter, *call; |
3945 | int ret; |
3946 | |
3947 | if (!tr) |
3948 | return -ENODEV; |
3949 | |
3950 | for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { |
3951 | |
3952 | call = *iter; |
3953 | ret = event_init(call); |
3954 | if (!ret) |
3955 | list_add(new: &call->list, head: &ftrace_events); |
3956 | } |
3957 | |
3958 | register_trigger_cmds(); |
3959 | |
3960 | /* |
3961 | * We need the top trace array to have a working set of trace |
3962 | * points at early init, before the debug files and directories |
3963 | * are created. Create the file entries now, and attach them |
3964 | * to the actual file dentries later. |
3965 | */ |
3966 | __trace_early_add_events(tr); |
3967 | |
3968 | early_enable_events(tr, buf: bootup_event_buf, disable_first: false); |
3969 | |
3970 | trace_printk_start_comm(); |
3971 | |
3972 | register_event_cmds(); |
3973 | |
3974 | |
3975 | return 0; |
3976 | } |
3977 | |
3978 | /* |
3979 | * event_trace_enable() is called from trace_event_init() first to |
3980 | * initialize events and perhaps start any events that are on the |
3981 | * command line. Unfortunately, there are some events that will not |
3982 | * start this early, like the system call tracepoints that need |
3983 | * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But |
3984 | * event_trace_enable() is called before pid 1 starts, and this flag |
3985 | * is never set, making the syscall tracepoint never get reached, but |
3986 | * the event is enabled regardless (and not doing anything). |
3987 | */ |
3988 | static __init int event_trace_enable_again(void) |
3989 | { |
3990 | struct trace_array *tr; |
3991 | |
3992 | tr = top_trace_array(); |
3993 | if (!tr) |
3994 | return -ENODEV; |
3995 | |
3996 | early_enable_events(tr, buf: bootup_event_buf, disable_first: true); |
3997 | |
3998 | return 0; |
3999 | } |
4000 | |
4001 | early_initcall(event_trace_enable_again); |
4002 | |
4003 | /* Init fields which doesn't related to the tracefs */ |
4004 | static __init int event_trace_init_fields(void) |
4005 | { |
4006 | if (trace_define_generic_fields()) |
4007 | pr_warn("tracing: Failed to allocated generic fields" ); |
4008 | |
4009 | if (trace_define_common_fields()) |
4010 | pr_warn("tracing: Failed to allocate common fields" ); |
4011 | |
4012 | return 0; |
4013 | } |
4014 | |
4015 | __init int event_trace_init(void) |
4016 | { |
4017 | struct trace_array *tr; |
4018 | int ret; |
4019 | |
4020 | tr = top_trace_array(); |
4021 | if (!tr) |
4022 | return -ENODEV; |
4023 | |
4024 | trace_create_file(name: "available_events" , TRACE_MODE_READ, |
4025 | NULL, data: tr, fops: &ftrace_avail_fops); |
4026 | |
4027 | ret = early_event_add_tracer(NULL, tr); |
4028 | if (ret) |
4029 | return ret; |
4030 | |
4031 | #ifdef CONFIG_MODULES |
4032 | ret = register_module_notifier(nb: &trace_module_nb); |
4033 | if (ret) |
4034 | pr_warn("Failed to register trace events module notifier\n" ); |
4035 | #endif |
4036 | |
4037 | eventdir_initialized = true; |
4038 | |
4039 | return 0; |
4040 | } |
4041 | |
4042 | void __init trace_event_init(void) |
4043 | { |
4044 | event_trace_memsetup(); |
4045 | init_ftrace_syscalls(); |
4046 | event_trace_enable(); |
4047 | event_trace_init_fields(); |
4048 | } |
4049 | |
4050 | #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST |
4051 | |
4052 | static DEFINE_SPINLOCK(test_spinlock); |
4053 | static DEFINE_SPINLOCK(test_spinlock_irq); |
4054 | static DEFINE_MUTEX(test_mutex); |
4055 | |
4056 | static __init void test_work(struct work_struct *dummy) |
4057 | { |
4058 | spin_lock(lock: &test_spinlock); |
4059 | spin_lock_irq(lock: &test_spinlock_irq); |
4060 | udelay(1); |
4061 | spin_unlock_irq(lock: &test_spinlock_irq); |
4062 | spin_unlock(lock: &test_spinlock); |
4063 | |
4064 | mutex_lock(&test_mutex); |
4065 | msleep(msecs: 1); |
4066 | mutex_unlock(lock: &test_mutex); |
4067 | } |
4068 | |
4069 | static __init int event_test_thread(void *unused) |
4070 | { |
4071 | void *test_malloc; |
4072 | |
4073 | test_malloc = kmalloc(size: 1234, GFP_KERNEL); |
4074 | if (!test_malloc) |
4075 | pr_info("failed to kmalloc\n" ); |
4076 | |
4077 | schedule_on_each_cpu(func: test_work); |
4078 | |
4079 | kfree(objp: test_malloc); |
4080 | |
4081 | set_current_state(TASK_INTERRUPTIBLE); |
4082 | while (!kthread_should_stop()) { |
4083 | schedule(); |
4084 | set_current_state(TASK_INTERRUPTIBLE); |
4085 | } |
4086 | __set_current_state(TASK_RUNNING); |
4087 | |
4088 | return 0; |
4089 | } |
4090 | |
4091 | /* |
4092 | * Do various things that may trigger events. |
4093 | */ |
4094 | static __init void event_test_stuff(void) |
4095 | { |
4096 | struct task_struct *test_thread; |
4097 | |
4098 | test_thread = kthread_run(event_test_thread, NULL, "test-events" ); |
4099 | msleep(msecs: 1); |
4100 | kthread_stop(k: test_thread); |
4101 | } |
4102 | |
4103 | /* |
4104 | * For every trace event defined, we will test each trace point separately, |
4105 | * and then by groups, and finally all trace points. |
4106 | */ |
4107 | static __init void event_trace_self_tests(void) |
4108 | { |
4109 | struct trace_subsystem_dir *dir; |
4110 | struct trace_event_file *file; |
4111 | struct trace_event_call *call; |
4112 | struct event_subsystem *system; |
4113 | struct trace_array *tr; |
4114 | int ret; |
4115 | |
4116 | tr = top_trace_array(); |
4117 | if (!tr) |
4118 | return; |
4119 | |
4120 | pr_info("Running tests on trace events:\n" ); |
4121 | |
4122 | list_for_each_entry(file, &tr->events, list) { |
4123 | |
4124 | call = file->event_call; |
4125 | |
4126 | /* Only test those that have a probe */ |
4127 | if (!call->class || !call->class->probe) |
4128 | continue; |
4129 | |
4130 | /* |
4131 | * Testing syscall events here is pretty useless, but |
4132 | * we still do it if configured. But this is time consuming. |
4133 | * What we really need is a user thread to perform the |
4134 | * syscalls as we test. |
4135 | */ |
4136 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS |
4137 | if (call->class->system && |
4138 | strcmp(call->class->system, "syscalls" ) == 0) |
4139 | continue; |
4140 | #endif |
4141 | |
4142 | pr_info("Testing event %s: " , trace_event_name(call)); |
4143 | |
4144 | /* |
4145 | * If an event is already enabled, someone is using |
4146 | * it and the self test should not be on. |
4147 | */ |
4148 | if (file->flags & EVENT_FILE_FL_ENABLED) { |
4149 | pr_warn("Enabled event during self test!\n" ); |
4150 | WARN_ON_ONCE(1); |
4151 | continue; |
4152 | } |
4153 | |
4154 | ftrace_event_enable_disable(file, enable: 1); |
4155 | event_test_stuff(); |
4156 | ftrace_event_enable_disable(file, enable: 0); |
4157 | |
4158 | pr_cont("OK\n" ); |
4159 | } |
4160 | |
4161 | /* Now test at the sub system level */ |
4162 | |
4163 | pr_info("Running tests on trace event systems:\n" ); |
4164 | |
4165 | list_for_each_entry(dir, &tr->systems, list) { |
4166 | |
4167 | system = dir->subsystem; |
4168 | |
4169 | /* the ftrace system is special, skip it */ |
4170 | if (strcmp(system->name, "ftrace" ) == 0) |
4171 | continue; |
4172 | |
4173 | pr_info("Testing event system %s: " , system->name); |
4174 | |
4175 | ret = __ftrace_set_clr_event(tr, NULL, sub: system->name, NULL, set: 1); |
4176 | if (WARN_ON_ONCE(ret)) { |
4177 | pr_warn("error enabling system %s\n" , |
4178 | system->name); |
4179 | continue; |
4180 | } |
4181 | |
4182 | event_test_stuff(); |
4183 | |
4184 | ret = __ftrace_set_clr_event(tr, NULL, sub: system->name, NULL, set: 0); |
4185 | if (WARN_ON_ONCE(ret)) { |
4186 | pr_warn("error disabling system %s\n" , |
4187 | system->name); |
4188 | continue; |
4189 | } |
4190 | |
4191 | pr_cont("OK\n" ); |
4192 | } |
4193 | |
4194 | /* Test with all events enabled */ |
4195 | |
4196 | pr_info("Running tests on all trace events:\n" ); |
4197 | pr_info("Testing all events: " ); |
4198 | |
4199 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, set: 1); |
4200 | if (WARN_ON_ONCE(ret)) { |
4201 | pr_warn("error enabling all events\n" ); |
4202 | return; |
4203 | } |
4204 | |
4205 | event_test_stuff(); |
4206 | |
4207 | /* reset sysname */ |
4208 | ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, set: 0); |
4209 | if (WARN_ON_ONCE(ret)) { |
4210 | pr_warn("error disabling all events\n" ); |
4211 | return; |
4212 | } |
4213 | |
4214 | pr_cont("OK\n" ); |
4215 | } |
4216 | |
4217 | #ifdef CONFIG_FUNCTION_TRACER |
4218 | |
4219 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
4220 | |
4221 | static struct trace_event_file event_trace_file __initdata; |
4222 | |
4223 | static void __init |
4224 | function_test_events_call(unsigned long ip, unsigned long parent_ip, |
4225 | struct ftrace_ops *op, struct ftrace_regs *regs) |
4226 | { |
4227 | struct trace_buffer *buffer; |
4228 | struct ring_buffer_event *event; |
4229 | struct ftrace_entry *entry; |
4230 | unsigned int trace_ctx; |
4231 | long disabled; |
4232 | int cpu; |
4233 | |
4234 | trace_ctx = tracing_gen_ctx(); |
4235 | preempt_disable_notrace(); |
4236 | cpu = raw_smp_processor_id(); |
4237 | disabled = atomic_inc_return(v: &per_cpu(ftrace_test_event_disable, cpu)); |
4238 | |
4239 | if (disabled != 1) |
4240 | goto out; |
4241 | |
4242 | event = trace_event_buffer_lock_reserve(current_buffer: &buffer, trace_file: &event_trace_file, |
4243 | type: TRACE_FN, len: sizeof(*entry), |
4244 | trace_ctx); |
4245 | if (!event) |
4246 | goto out; |
4247 | entry = ring_buffer_event_data(event); |
4248 | entry->ip = ip; |
4249 | entry->parent_ip = parent_ip; |
4250 | |
4251 | event_trigger_unlock_commit(file: &event_trace_file, buffer, event, |
4252 | entry, trace_ctx); |
4253 | out: |
4254 | atomic_dec(v: &per_cpu(ftrace_test_event_disable, cpu)); |
4255 | preempt_enable_notrace(); |
4256 | } |
4257 | |
4258 | static struct ftrace_ops trace_ops __initdata = |
4259 | { |
4260 | .func = function_test_events_call, |
4261 | }; |
4262 | |
4263 | static __init void event_trace_self_test_with_function(void) |
4264 | { |
4265 | int ret; |
4266 | |
4267 | event_trace_file.tr = top_trace_array(); |
4268 | if (WARN_ON(!event_trace_file.tr)) |
4269 | return; |
4270 | |
4271 | ret = register_ftrace_function(ops: &trace_ops); |
4272 | if (WARN_ON(ret < 0)) { |
4273 | pr_info("Failed to enable function tracer for event tests\n" ); |
4274 | return; |
4275 | } |
4276 | pr_info("Running tests again, along with the function tracer\n" ); |
4277 | event_trace_self_tests(); |
4278 | unregister_ftrace_function(ops: &trace_ops); |
4279 | } |
4280 | #else |
4281 | static __init void event_trace_self_test_with_function(void) |
4282 | { |
4283 | } |
4284 | #endif |
4285 | |
4286 | static __init int event_trace_self_tests_init(void) |
4287 | { |
4288 | if (!tracing_selftest_disabled) { |
4289 | event_trace_self_tests(); |
4290 | event_trace_self_test_with_function(); |
4291 | } |
4292 | |
4293 | return 0; |
4294 | } |
4295 | |
4296 | late_initcall(event_trace_self_tests_init); |
4297 | |
4298 | #endif |
4299 | |