1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Infrastructure for profiling code inserted by 'gcc -pg'. |
4 | * |
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
6 | * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> |
7 | * |
8 | * Originally ported from the -rt patch by: |
9 | * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> |
10 | * |
11 | * Based on code in the latency_tracer, that is: |
12 | * |
13 | * Copyright (C) 2004-2006 Ingo Molnar |
14 | * Copyright (C) 2004 Nadia Yvette Chambers |
15 | */ |
16 | |
17 | #include <linux/stop_machine.h> |
18 | #include <linux/clocksource.h> |
19 | #include <linux/sched/task.h> |
20 | #include <linux/kallsyms.h> |
21 | #include <linux/security.h> |
22 | #include <linux/seq_file.h> |
23 | #include <linux/tracefs.h> |
24 | #include <linux/hardirq.h> |
25 | #include <linux/kthread.h> |
26 | #include <linux/uaccess.h> |
27 | #include <linux/bsearch.h> |
28 | #include <linux/module.h> |
29 | #include <linux/ftrace.h> |
30 | #include <linux/sysctl.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/ctype.h> |
33 | #include <linux/sort.h> |
34 | #include <linux/list.h> |
35 | #include <linux/hash.h> |
36 | #include <linux/rcupdate.h> |
37 | #include <linux/kprobes.h> |
38 | |
39 | #include <trace/events/sched.h> |
40 | |
41 | #include <asm/sections.h> |
42 | #include <asm/setup.h> |
43 | |
44 | #include "ftrace_internal.h" |
45 | #include "trace_output.h" |
46 | #include "trace_stat.h" |
47 | |
48 | /* Flags that do not get reset */ |
49 | #define FTRACE_NOCLEAR_FLAGS (FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \ |
50 | FTRACE_FL_MODIFIED) |
51 | |
52 | #define FTRACE_INVALID_FUNCTION "__ftrace_invalid_address__" |
53 | |
54 | #define FTRACE_WARN_ON(cond) \ |
55 | ({ \ |
56 | int ___r = cond; \ |
57 | if (WARN_ON(___r)) \ |
58 | ftrace_kill(); \ |
59 | ___r; \ |
60 | }) |
61 | |
62 | #define FTRACE_WARN_ON_ONCE(cond) \ |
63 | ({ \ |
64 | int ___r = cond; \ |
65 | if (WARN_ON_ONCE(___r)) \ |
66 | ftrace_kill(); \ |
67 | ___r; \ |
68 | }) |
69 | |
70 | /* hash bits for specific function selection */ |
71 | #define FTRACE_HASH_DEFAULT_BITS 10 |
72 | #define FTRACE_HASH_MAX_BITS 12 |
73 | |
74 | #ifdef CONFIG_DYNAMIC_FTRACE |
75 | #define INIT_OPS_HASH(opsname) \ |
76 | .func_hash = &opsname.local_hash, \ |
77 | .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), |
78 | #else |
79 | #define INIT_OPS_HASH(opsname) |
80 | #endif |
81 | |
82 | enum { |
83 | FTRACE_MODIFY_ENABLE_FL = (1 << 0), |
84 | FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), |
85 | }; |
86 | |
87 | struct ftrace_ops ftrace_list_end __read_mostly = { |
88 | .func = ftrace_stub, |
89 | .flags = FTRACE_OPS_FL_STUB, |
90 | INIT_OPS_HASH(ftrace_list_end) |
91 | }; |
92 | |
93 | /* ftrace_enabled is a method to turn ftrace on or off */ |
94 | int ftrace_enabled __read_mostly; |
95 | static int __maybe_unused last_ftrace_enabled; |
96 | |
97 | /* Current function tracing op */ |
98 | struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; |
99 | /* What to set function_trace_op to */ |
100 | static struct ftrace_ops *set_function_trace_op; |
101 | |
102 | static bool ftrace_pids_enabled(struct ftrace_ops *ops) |
103 | { |
104 | struct trace_array *tr; |
105 | |
106 | if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) |
107 | return false; |
108 | |
109 | tr = ops->private; |
110 | |
111 | return tr->function_pids != NULL || tr->function_no_pids != NULL; |
112 | } |
113 | |
114 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
115 | |
116 | /* |
117 | * ftrace_disabled is set when an anomaly is discovered. |
118 | * ftrace_disabled is much stronger than ftrace_enabled. |
119 | */ |
120 | static int ftrace_disabled __read_mostly; |
121 | |
122 | DEFINE_MUTEX(ftrace_lock); |
123 | |
124 | struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; |
125 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
126 | struct ftrace_ops global_ops; |
127 | |
128 | /* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */ |
129 | void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
130 | struct ftrace_ops *op, struct ftrace_regs *fregs); |
131 | |
132 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS |
133 | /* |
134 | * Stub used to invoke the list ops without requiring a separate trampoline. |
135 | */ |
136 | const struct ftrace_ops ftrace_list_ops = { |
137 | .func = ftrace_ops_list_func, |
138 | .flags = FTRACE_OPS_FL_STUB, |
139 | }; |
140 | |
141 | static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip, |
142 | struct ftrace_ops *op, |
143 | struct ftrace_regs *fregs) |
144 | { |
145 | /* do nothing */ |
146 | } |
147 | |
148 | /* |
149 | * Stub used when a call site is disabled. May be called transiently by threads |
150 | * which have made it into ftrace_caller but haven't yet recovered the ops at |
151 | * the point the call site is disabled. |
152 | */ |
153 | const struct ftrace_ops ftrace_nop_ops = { |
154 | .func = ftrace_ops_nop_func, |
155 | .flags = FTRACE_OPS_FL_STUB, |
156 | }; |
157 | #endif |
158 | |
159 | static inline void ftrace_ops_init(struct ftrace_ops *ops) |
160 | { |
161 | #ifdef CONFIG_DYNAMIC_FTRACE |
162 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { |
163 | mutex_init(&ops->local_hash.regex_lock); |
164 | ops->func_hash = &ops->local_hash; |
165 | ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
166 | } |
167 | #endif |
168 | } |
169 | |
170 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, |
171 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
172 | { |
173 | struct trace_array *tr = op->private; |
174 | int pid; |
175 | |
176 | if (tr) { |
177 | pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); |
178 | if (pid == FTRACE_PID_IGNORE) |
179 | return; |
180 | if (pid != FTRACE_PID_TRACE && |
181 | pid != current->pid) |
182 | return; |
183 | } |
184 | |
185 | op->saved_func(ip, parent_ip, op, fregs); |
186 | } |
187 | |
188 | static void ftrace_sync_ipi(void *data) |
189 | { |
190 | /* Probably not needed, but do it anyway */ |
191 | smp_rmb(); |
192 | } |
193 | |
194 | static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) |
195 | { |
196 | /* |
197 | * If this is a dynamic or RCU ops, or we force list func, |
198 | * then it needs to call the list anyway. |
199 | */ |
200 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || |
201 | FTRACE_FORCE_LIST_FUNC) |
202 | return ftrace_ops_list_func; |
203 | |
204 | return ftrace_ops_get_func(ops); |
205 | } |
206 | |
207 | static void update_ftrace_function(void) |
208 | { |
209 | ftrace_func_t func; |
210 | |
211 | /* |
212 | * Prepare the ftrace_ops that the arch callback will use. |
213 | * If there's only one ftrace_ops registered, the ftrace_ops_list |
214 | * will point to the ops we want. |
215 | */ |
216 | set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, |
217 | lockdep_is_held(&ftrace_lock)); |
218 | |
219 | /* If there's no ftrace_ops registered, just call the stub function */ |
220 | if (set_function_trace_op == &ftrace_list_end) { |
221 | func = ftrace_stub; |
222 | |
223 | /* |
224 | * If we are at the end of the list and this ops is |
225 | * recursion safe and not dynamic and the arch supports passing ops, |
226 | * then have the mcount trampoline call the function directly. |
227 | */ |
228 | } else if (rcu_dereference_protected(ftrace_ops_list->next, |
229 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
230 | func = ftrace_ops_get_list_func(ops: ftrace_ops_list); |
231 | |
232 | } else { |
233 | /* Just use the default ftrace_ops */ |
234 | set_function_trace_op = &ftrace_list_end; |
235 | func = ftrace_ops_list_func; |
236 | } |
237 | |
238 | update_function_graph_func(); |
239 | |
240 | /* If there's no change, then do nothing more here */ |
241 | if (ftrace_trace_function == func) |
242 | return; |
243 | |
244 | /* |
245 | * If we are using the list function, it doesn't care |
246 | * about the function_trace_ops. |
247 | */ |
248 | if (func == ftrace_ops_list_func) { |
249 | ftrace_trace_function = func; |
250 | /* |
251 | * Don't even bother setting function_trace_ops, |
252 | * it would be racy to do so anyway. |
253 | */ |
254 | return; |
255 | } |
256 | |
257 | #ifndef CONFIG_DYNAMIC_FTRACE |
258 | /* |
259 | * For static tracing, we need to be a bit more careful. |
260 | * The function change takes affect immediately. Thus, |
261 | * we need to coordinate the setting of the function_trace_ops |
262 | * with the setting of the ftrace_trace_function. |
263 | * |
264 | * Set the function to the list ops, which will call the |
265 | * function we want, albeit indirectly, but it handles the |
266 | * ftrace_ops and doesn't depend on function_trace_op. |
267 | */ |
268 | ftrace_trace_function = ftrace_ops_list_func; |
269 | /* |
270 | * Make sure all CPUs see this. Yes this is slow, but static |
271 | * tracing is slow and nasty to have enabled. |
272 | */ |
273 | synchronize_rcu_tasks_rude(); |
274 | /* Now all cpus are using the list ops. */ |
275 | function_trace_op = set_function_trace_op; |
276 | /* Make sure the function_trace_op is visible on all CPUs */ |
277 | smp_wmb(); |
278 | /* Nasty way to force a rmb on all cpus */ |
279 | smp_call_function(ftrace_sync_ipi, NULL, 1); |
280 | /* OK, we are all set to update the ftrace_trace_function now! */ |
281 | #endif /* !CONFIG_DYNAMIC_FTRACE */ |
282 | |
283 | ftrace_trace_function = func; |
284 | } |
285 | |
286 | static void add_ftrace_ops(struct ftrace_ops __rcu **list, |
287 | struct ftrace_ops *ops) |
288 | { |
289 | rcu_assign_pointer(ops->next, *list); |
290 | |
291 | /* |
292 | * We are entering ops into the list but another |
293 | * CPU might be walking that list. We need to make sure |
294 | * the ops->next pointer is valid before another CPU sees |
295 | * the ops pointer included into the list. |
296 | */ |
297 | rcu_assign_pointer(*list, ops); |
298 | } |
299 | |
300 | static int remove_ftrace_ops(struct ftrace_ops __rcu **list, |
301 | struct ftrace_ops *ops) |
302 | { |
303 | struct ftrace_ops **p; |
304 | |
305 | /* |
306 | * If we are removing the last function, then simply point |
307 | * to the ftrace_stub. |
308 | */ |
309 | if (rcu_dereference_protected(*list, |
310 | lockdep_is_held(&ftrace_lock)) == ops && |
311 | rcu_dereference_protected(ops->next, |
312 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
313 | *list = &ftrace_list_end; |
314 | return 0; |
315 | } |
316 | |
317 | for (p = list; *p != &ftrace_list_end; p = &(*p)->next) |
318 | if (*p == ops) |
319 | break; |
320 | |
321 | if (*p != ops) |
322 | return -1; |
323 | |
324 | *p = (*p)->next; |
325 | return 0; |
326 | } |
327 | |
328 | static void ftrace_update_trampoline(struct ftrace_ops *ops); |
329 | |
330 | int __register_ftrace_function(struct ftrace_ops *ops) |
331 | { |
332 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
333 | return -EINVAL; |
334 | |
335 | if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) |
336 | return -EBUSY; |
337 | |
338 | #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
339 | /* |
340 | * If the ftrace_ops specifies SAVE_REGS, then it only can be used |
341 | * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. |
342 | * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. |
343 | */ |
344 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && |
345 | !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) |
346 | return -EINVAL; |
347 | |
348 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) |
349 | ops->flags |= FTRACE_OPS_FL_SAVE_REGS; |
350 | #endif |
351 | if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) |
352 | return -EBUSY; |
353 | |
354 | if (!is_kernel_core_data(addr: (unsigned long)ops)) |
355 | ops->flags |= FTRACE_OPS_FL_DYNAMIC; |
356 | |
357 | add_ftrace_ops(list: &ftrace_ops_list, ops); |
358 | |
359 | /* Always save the function, and reset at unregistering */ |
360 | ops->saved_func = ops->func; |
361 | |
362 | if (ftrace_pids_enabled(ops)) |
363 | ops->func = ftrace_pid_func; |
364 | |
365 | ftrace_update_trampoline(ops); |
366 | |
367 | if (ftrace_enabled) |
368 | update_ftrace_function(); |
369 | |
370 | return 0; |
371 | } |
372 | |
373 | int __unregister_ftrace_function(struct ftrace_ops *ops) |
374 | { |
375 | int ret; |
376 | |
377 | if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) |
378 | return -EBUSY; |
379 | |
380 | ret = remove_ftrace_ops(list: &ftrace_ops_list, ops); |
381 | |
382 | if (ret < 0) |
383 | return ret; |
384 | |
385 | if (ftrace_enabled) |
386 | update_ftrace_function(); |
387 | |
388 | ops->func = ops->saved_func; |
389 | |
390 | return 0; |
391 | } |
392 | |
393 | static void ftrace_update_pid_func(void) |
394 | { |
395 | struct ftrace_ops *op; |
396 | |
397 | /* Only do something if we are tracing something */ |
398 | if (ftrace_trace_function == ftrace_stub) |
399 | return; |
400 | |
401 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
402 | if (op->flags & FTRACE_OPS_FL_PID) { |
403 | op->func = ftrace_pids_enabled(ops: op) ? |
404 | ftrace_pid_func : op->saved_func; |
405 | ftrace_update_trampoline(ops: op); |
406 | } |
407 | } while_for_each_ftrace_op(op); |
408 | |
409 | update_ftrace_function(); |
410 | } |
411 | |
412 | #ifdef CONFIG_FUNCTION_PROFILER |
413 | struct ftrace_profile { |
414 | struct hlist_node node; |
415 | unsigned long ip; |
416 | unsigned long counter; |
417 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
418 | unsigned long long time; |
419 | unsigned long long time_squared; |
420 | #endif |
421 | }; |
422 | |
423 | struct ftrace_profile_page { |
424 | struct ftrace_profile_page *next; |
425 | unsigned long index; |
426 | struct ftrace_profile records[]; |
427 | }; |
428 | |
429 | struct ftrace_profile_stat { |
430 | atomic_t disabled; |
431 | struct hlist_head *hash; |
432 | struct ftrace_profile_page *pages; |
433 | struct ftrace_profile_page *start; |
434 | struct tracer_stat stat; |
435 | }; |
436 | |
437 | #define PROFILE_RECORDS_SIZE \ |
438 | (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) |
439 | |
440 | #define PROFILES_PER_PAGE \ |
441 | (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) |
442 | |
443 | static int ftrace_profile_enabled __read_mostly; |
444 | |
445 | /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ |
446 | static DEFINE_MUTEX(ftrace_profile_lock); |
447 | |
448 | static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); |
449 | |
450 | #define FTRACE_PROFILE_HASH_BITS 10 |
451 | #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) |
452 | |
453 | static void * |
454 | function_stat_next(void *v, int idx) |
455 | { |
456 | struct ftrace_profile *rec = v; |
457 | struct ftrace_profile_page *pg; |
458 | |
459 | pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); |
460 | |
461 | again: |
462 | if (idx != 0) |
463 | rec++; |
464 | |
465 | if ((void *)rec >= (void *)&pg->records[pg->index]) { |
466 | pg = pg->next; |
467 | if (!pg) |
468 | return NULL; |
469 | rec = &pg->records[0]; |
470 | if (!rec->counter) |
471 | goto again; |
472 | } |
473 | |
474 | return rec; |
475 | } |
476 | |
477 | static void *function_stat_start(struct tracer_stat *trace) |
478 | { |
479 | struct ftrace_profile_stat *stat = |
480 | container_of(trace, struct ftrace_profile_stat, stat); |
481 | |
482 | if (!stat || !stat->start) |
483 | return NULL; |
484 | |
485 | return function_stat_next(v: &stat->start->records[0], idx: 0); |
486 | } |
487 | |
488 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
489 | /* function graph compares on total time */ |
490 | static int function_stat_cmp(const void *p1, const void *p2) |
491 | { |
492 | const struct ftrace_profile *a = p1; |
493 | const struct ftrace_profile *b = p2; |
494 | |
495 | if (a->time < b->time) |
496 | return -1; |
497 | if (a->time > b->time) |
498 | return 1; |
499 | else |
500 | return 0; |
501 | } |
502 | #else |
503 | /* not function graph compares against hits */ |
504 | static int function_stat_cmp(const void *p1, const void *p2) |
505 | { |
506 | const struct ftrace_profile *a = p1; |
507 | const struct ftrace_profile *b = p2; |
508 | |
509 | if (a->counter < b->counter) |
510 | return -1; |
511 | if (a->counter > b->counter) |
512 | return 1; |
513 | else |
514 | return 0; |
515 | } |
516 | #endif |
517 | |
518 | static int (struct seq_file *m) |
519 | { |
520 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
521 | seq_puts(m, s: " Function " |
522 | "Hit Time Avg s^2\n" |
523 | " -------- " |
524 | "--- ---- --- ---\n" ); |
525 | #else |
526 | seq_puts(m, " Function Hit\n" |
527 | " -------- ---\n" ); |
528 | #endif |
529 | return 0; |
530 | } |
531 | |
532 | static int function_stat_show(struct seq_file *m, void *v) |
533 | { |
534 | struct ftrace_profile *rec = v; |
535 | char str[KSYM_SYMBOL_LEN]; |
536 | int ret = 0; |
537 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
538 | static struct trace_seq s; |
539 | unsigned long long avg; |
540 | unsigned long long stddev; |
541 | #endif |
542 | mutex_lock(&ftrace_profile_lock); |
543 | |
544 | /* we raced with function_profile_reset() */ |
545 | if (unlikely(rec->counter == 0)) { |
546 | ret = -EBUSY; |
547 | goto out; |
548 | } |
549 | |
550 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
551 | avg = div64_ul(rec->time, rec->counter); |
552 | if (tracing_thresh && (avg < tracing_thresh)) |
553 | goto out; |
554 | #endif |
555 | |
556 | kallsyms_lookup(addr: rec->ip, NULL, NULL, NULL, namebuf: str); |
557 | seq_printf(m, fmt: " %-30.30s %10lu" , str, rec->counter); |
558 | |
559 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
560 | seq_puts(m, s: " " ); |
561 | |
562 | /* Sample standard deviation (s^2) */ |
563 | if (rec->counter <= 1) |
564 | stddev = 0; |
565 | else { |
566 | /* |
567 | * Apply Welford's method: |
568 | * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) |
569 | */ |
570 | stddev = rec->counter * rec->time_squared - |
571 | rec->time * rec->time; |
572 | |
573 | /* |
574 | * Divide only 1000 for ns^2 -> us^2 conversion. |
575 | * trace_print_graph_duration will divide 1000 again. |
576 | */ |
577 | stddev = div64_ul(stddev, |
578 | rec->counter * (rec->counter - 1) * 1000); |
579 | } |
580 | |
581 | trace_seq_init(s: &s); |
582 | trace_print_graph_duration(duration: rec->time, s: &s); |
583 | trace_seq_puts(s: &s, str: " " ); |
584 | trace_print_graph_duration(duration: avg, s: &s); |
585 | trace_seq_puts(s: &s, str: " " ); |
586 | trace_print_graph_duration(duration: stddev, s: &s); |
587 | trace_print_seq(m, s: &s); |
588 | #endif |
589 | seq_putc(m, c: '\n'); |
590 | out: |
591 | mutex_unlock(lock: &ftrace_profile_lock); |
592 | |
593 | return ret; |
594 | } |
595 | |
596 | static void ftrace_profile_reset(struct ftrace_profile_stat *stat) |
597 | { |
598 | struct ftrace_profile_page *pg; |
599 | |
600 | pg = stat->pages = stat->start; |
601 | |
602 | while (pg) { |
603 | memset(pg->records, 0, PROFILE_RECORDS_SIZE); |
604 | pg->index = 0; |
605 | pg = pg->next; |
606 | } |
607 | |
608 | memset(stat->hash, 0, |
609 | FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); |
610 | } |
611 | |
612 | static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) |
613 | { |
614 | struct ftrace_profile_page *pg; |
615 | int functions; |
616 | int pages; |
617 | int i; |
618 | |
619 | /* If we already allocated, do nothing */ |
620 | if (stat->pages) |
621 | return 0; |
622 | |
623 | stat->pages = (void *)get_zeroed_page(GFP_KERNEL); |
624 | if (!stat->pages) |
625 | return -ENOMEM; |
626 | |
627 | #ifdef CONFIG_DYNAMIC_FTRACE |
628 | functions = ftrace_update_tot_cnt; |
629 | #else |
630 | /* |
631 | * We do not know the number of functions that exist because |
632 | * dynamic tracing is what counts them. With past experience |
633 | * we have around 20K functions. That should be more than enough. |
634 | * It is highly unlikely we will execute every function in |
635 | * the kernel. |
636 | */ |
637 | functions = 20000; |
638 | #endif |
639 | |
640 | pg = stat->start = stat->pages; |
641 | |
642 | pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); |
643 | |
644 | for (i = 1; i < pages; i++) { |
645 | pg->next = (void *)get_zeroed_page(GFP_KERNEL); |
646 | if (!pg->next) |
647 | goto out_free; |
648 | pg = pg->next; |
649 | } |
650 | |
651 | return 0; |
652 | |
653 | out_free: |
654 | pg = stat->start; |
655 | while (pg) { |
656 | unsigned long tmp = (unsigned long)pg; |
657 | |
658 | pg = pg->next; |
659 | free_page(tmp); |
660 | } |
661 | |
662 | stat->pages = NULL; |
663 | stat->start = NULL; |
664 | |
665 | return -ENOMEM; |
666 | } |
667 | |
668 | static int ftrace_profile_init_cpu(int cpu) |
669 | { |
670 | struct ftrace_profile_stat *stat; |
671 | int size; |
672 | |
673 | stat = &per_cpu(ftrace_profile_stats, cpu); |
674 | |
675 | if (stat->hash) { |
676 | /* If the profile is already created, simply reset it */ |
677 | ftrace_profile_reset(stat); |
678 | return 0; |
679 | } |
680 | |
681 | /* |
682 | * We are profiling all functions, but usually only a few thousand |
683 | * functions are hit. We'll make a hash of 1024 items. |
684 | */ |
685 | size = FTRACE_PROFILE_HASH_SIZE; |
686 | |
687 | stat->hash = kcalloc(n: size, size: sizeof(struct hlist_head), GFP_KERNEL); |
688 | |
689 | if (!stat->hash) |
690 | return -ENOMEM; |
691 | |
692 | /* Preallocate the function profiling pages */ |
693 | if (ftrace_profile_pages_init(stat) < 0) { |
694 | kfree(objp: stat->hash); |
695 | stat->hash = NULL; |
696 | return -ENOMEM; |
697 | } |
698 | |
699 | return 0; |
700 | } |
701 | |
702 | static int ftrace_profile_init(void) |
703 | { |
704 | int cpu; |
705 | int ret = 0; |
706 | |
707 | for_each_possible_cpu(cpu) { |
708 | ret = ftrace_profile_init_cpu(cpu); |
709 | if (ret) |
710 | break; |
711 | } |
712 | |
713 | return ret; |
714 | } |
715 | |
716 | /* interrupts must be disabled */ |
717 | static struct ftrace_profile * |
718 | ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) |
719 | { |
720 | struct ftrace_profile *rec; |
721 | struct hlist_head *hhd; |
722 | unsigned long key; |
723 | |
724 | key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); |
725 | hhd = &stat->hash[key]; |
726 | |
727 | if (hlist_empty(h: hhd)) |
728 | return NULL; |
729 | |
730 | hlist_for_each_entry_rcu_notrace(rec, hhd, node) { |
731 | if (rec->ip == ip) |
732 | return rec; |
733 | } |
734 | |
735 | return NULL; |
736 | } |
737 | |
738 | static void ftrace_add_profile(struct ftrace_profile_stat *stat, |
739 | struct ftrace_profile *rec) |
740 | { |
741 | unsigned long key; |
742 | |
743 | key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); |
744 | hlist_add_head_rcu(n: &rec->node, h: &stat->hash[key]); |
745 | } |
746 | |
747 | /* |
748 | * The memory is already allocated, this simply finds a new record to use. |
749 | */ |
750 | static struct ftrace_profile * |
751 | ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) |
752 | { |
753 | struct ftrace_profile *rec = NULL; |
754 | |
755 | /* prevent recursion (from NMIs) */ |
756 | if (atomic_inc_return(v: &stat->disabled) != 1) |
757 | goto out; |
758 | |
759 | /* |
760 | * Try to find the function again since an NMI |
761 | * could have added it |
762 | */ |
763 | rec = ftrace_find_profiled_func(stat, ip); |
764 | if (rec) |
765 | goto out; |
766 | |
767 | if (stat->pages->index == PROFILES_PER_PAGE) { |
768 | if (!stat->pages->next) |
769 | goto out; |
770 | stat->pages = stat->pages->next; |
771 | } |
772 | |
773 | rec = &stat->pages->records[stat->pages->index++]; |
774 | rec->ip = ip; |
775 | ftrace_add_profile(stat, rec); |
776 | |
777 | out: |
778 | atomic_dec(v: &stat->disabled); |
779 | |
780 | return rec; |
781 | } |
782 | |
783 | static void |
784 | function_profile_call(unsigned long ip, unsigned long parent_ip, |
785 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
786 | { |
787 | struct ftrace_profile_stat *stat; |
788 | struct ftrace_profile *rec; |
789 | unsigned long flags; |
790 | |
791 | if (!ftrace_profile_enabled) |
792 | return; |
793 | |
794 | local_irq_save(flags); |
795 | |
796 | stat = this_cpu_ptr(&ftrace_profile_stats); |
797 | if (!stat->hash || !ftrace_profile_enabled) |
798 | goto out; |
799 | |
800 | rec = ftrace_find_profiled_func(stat, ip); |
801 | if (!rec) { |
802 | rec = ftrace_profile_alloc(stat, ip); |
803 | if (!rec) |
804 | goto out; |
805 | } |
806 | |
807 | rec->counter++; |
808 | out: |
809 | local_irq_restore(flags); |
810 | } |
811 | |
812 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
813 | static bool fgraph_graph_time = true; |
814 | |
815 | void ftrace_graph_graph_time_control(bool enable) |
816 | { |
817 | fgraph_graph_time = enable; |
818 | } |
819 | |
820 | static int profile_graph_entry(struct ftrace_graph_ent *trace) |
821 | { |
822 | struct ftrace_ret_stack *ret_stack; |
823 | |
824 | function_profile_call(ip: trace->func, parent_ip: 0, NULL, NULL); |
825 | |
826 | /* If function graph is shutting down, ret_stack can be NULL */ |
827 | if (!current->ret_stack) |
828 | return 0; |
829 | |
830 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 0); |
831 | if (ret_stack) |
832 | ret_stack->subtime = 0; |
833 | |
834 | return 1; |
835 | } |
836 | |
837 | static void profile_graph_return(struct ftrace_graph_ret *trace) |
838 | { |
839 | struct ftrace_ret_stack *ret_stack; |
840 | struct ftrace_profile_stat *stat; |
841 | unsigned long long calltime; |
842 | struct ftrace_profile *rec; |
843 | unsigned long flags; |
844 | |
845 | local_irq_save(flags); |
846 | stat = this_cpu_ptr(&ftrace_profile_stats); |
847 | if (!stat->hash || !ftrace_profile_enabled) |
848 | goto out; |
849 | |
850 | /* If the calltime was zero'd ignore it */ |
851 | if (!trace->calltime) |
852 | goto out; |
853 | |
854 | calltime = trace->rettime - trace->calltime; |
855 | |
856 | if (!fgraph_graph_time) { |
857 | |
858 | /* Append this call time to the parent time to subtract */ |
859 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 1); |
860 | if (ret_stack) |
861 | ret_stack->subtime += calltime; |
862 | |
863 | ret_stack = ftrace_graph_get_ret_stack(current, idx: 0); |
864 | if (ret_stack && ret_stack->subtime < calltime) |
865 | calltime -= ret_stack->subtime; |
866 | else |
867 | calltime = 0; |
868 | } |
869 | |
870 | rec = ftrace_find_profiled_func(stat, ip: trace->func); |
871 | if (rec) { |
872 | rec->time += calltime; |
873 | rec->time_squared += calltime * calltime; |
874 | } |
875 | |
876 | out: |
877 | local_irq_restore(flags); |
878 | } |
879 | |
880 | static struct fgraph_ops fprofiler_ops = { |
881 | .entryfunc = &profile_graph_entry, |
882 | .retfunc = &profile_graph_return, |
883 | }; |
884 | |
885 | static int register_ftrace_profiler(void) |
886 | { |
887 | return register_ftrace_graph(ops: &fprofiler_ops); |
888 | } |
889 | |
890 | static void unregister_ftrace_profiler(void) |
891 | { |
892 | unregister_ftrace_graph(ops: &fprofiler_ops); |
893 | } |
894 | #else |
895 | static struct ftrace_ops ftrace_profile_ops __read_mostly = { |
896 | .func = function_profile_call, |
897 | .flags = FTRACE_OPS_FL_INITIALIZED, |
898 | INIT_OPS_HASH(ftrace_profile_ops) |
899 | }; |
900 | |
901 | static int register_ftrace_profiler(void) |
902 | { |
903 | return register_ftrace_function(&ftrace_profile_ops); |
904 | } |
905 | |
906 | static void unregister_ftrace_profiler(void) |
907 | { |
908 | unregister_ftrace_function(&ftrace_profile_ops); |
909 | } |
910 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
911 | |
912 | static ssize_t |
913 | ftrace_profile_write(struct file *filp, const char __user *ubuf, |
914 | size_t cnt, loff_t *ppos) |
915 | { |
916 | unsigned long val; |
917 | int ret; |
918 | |
919 | ret = kstrtoul_from_user(s: ubuf, count: cnt, base: 10, res: &val); |
920 | if (ret) |
921 | return ret; |
922 | |
923 | val = !!val; |
924 | |
925 | mutex_lock(&ftrace_profile_lock); |
926 | if (ftrace_profile_enabled ^ val) { |
927 | if (val) { |
928 | ret = ftrace_profile_init(); |
929 | if (ret < 0) { |
930 | cnt = ret; |
931 | goto out; |
932 | } |
933 | |
934 | ret = register_ftrace_profiler(); |
935 | if (ret < 0) { |
936 | cnt = ret; |
937 | goto out; |
938 | } |
939 | ftrace_profile_enabled = 1; |
940 | } else { |
941 | ftrace_profile_enabled = 0; |
942 | /* |
943 | * unregister_ftrace_profiler calls stop_machine |
944 | * so this acts like an synchronize_rcu. |
945 | */ |
946 | unregister_ftrace_profiler(); |
947 | } |
948 | } |
949 | out: |
950 | mutex_unlock(lock: &ftrace_profile_lock); |
951 | |
952 | *ppos += cnt; |
953 | |
954 | return cnt; |
955 | } |
956 | |
957 | static ssize_t |
958 | ftrace_profile_read(struct file *filp, char __user *ubuf, |
959 | size_t cnt, loff_t *ppos) |
960 | { |
961 | char buf[64]; /* big enough to hold a number */ |
962 | int r; |
963 | |
964 | r = sprintf(buf, fmt: "%u\n" , ftrace_profile_enabled); |
965 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, from: buf, available: r); |
966 | } |
967 | |
968 | static const struct file_operations ftrace_profile_fops = { |
969 | .open = tracing_open_generic, |
970 | .read = ftrace_profile_read, |
971 | .write = ftrace_profile_write, |
972 | .llseek = default_llseek, |
973 | }; |
974 | |
975 | /* used to initialize the real stat files */ |
976 | static struct tracer_stat function_stats __initdata = { |
977 | .name = "functions" , |
978 | .stat_start = function_stat_start, |
979 | .stat_next = function_stat_next, |
980 | .stat_cmp = function_stat_cmp, |
981 | .stat_headers = function_stat_headers, |
982 | .stat_show = function_stat_show |
983 | }; |
984 | |
985 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
986 | { |
987 | struct ftrace_profile_stat *stat; |
988 | char *name; |
989 | int ret; |
990 | int cpu; |
991 | |
992 | for_each_possible_cpu(cpu) { |
993 | stat = &per_cpu(ftrace_profile_stats, cpu); |
994 | |
995 | name = kasprintf(GFP_KERNEL, fmt: "function%d" , cpu); |
996 | if (!name) { |
997 | /* |
998 | * The files created are permanent, if something happens |
999 | * we still do not free memory. |
1000 | */ |
1001 | WARN(1, |
1002 | "Could not allocate stat file for cpu %d\n" , |
1003 | cpu); |
1004 | return; |
1005 | } |
1006 | stat->stat = function_stats; |
1007 | stat->stat.name = name; |
1008 | ret = register_stat_tracer(trace: &stat->stat); |
1009 | if (ret) { |
1010 | WARN(1, |
1011 | "Could not register function stat for cpu %d\n" , |
1012 | cpu); |
1013 | kfree(objp: name); |
1014 | return; |
1015 | } |
1016 | } |
1017 | |
1018 | trace_create_file(name: "function_profile_enabled" , |
1019 | TRACE_MODE_WRITE, parent: d_tracer, NULL, |
1020 | fops: &ftrace_profile_fops); |
1021 | } |
1022 | |
1023 | #else /* CONFIG_FUNCTION_PROFILER */ |
1024 | static __init void ftrace_profile_tracefs(struct dentry *d_tracer) |
1025 | { |
1026 | } |
1027 | #endif /* CONFIG_FUNCTION_PROFILER */ |
1028 | |
1029 | #ifdef CONFIG_DYNAMIC_FTRACE |
1030 | |
1031 | static struct ftrace_ops *removed_ops; |
1032 | |
1033 | /* |
1034 | * Set when doing a global update, like enabling all recs or disabling them. |
1035 | * It is not set when just updating a single ftrace_ops. |
1036 | */ |
1037 | static bool update_all_ops; |
1038 | |
1039 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
1040 | # error Dynamic ftrace depends on MCOUNT_RECORD |
1041 | #endif |
1042 | |
1043 | struct ftrace_func_probe { |
1044 | struct ftrace_probe_ops *probe_ops; |
1045 | struct ftrace_ops ops; |
1046 | struct trace_array *tr; |
1047 | struct list_head list; |
1048 | void *data; |
1049 | int ref; |
1050 | }; |
1051 | |
1052 | /* |
1053 | * We make these constant because no one should touch them, |
1054 | * but they are used as the default "empty hash", to avoid allocating |
1055 | * it all the time. These are in a read only section such that if |
1056 | * anyone does try to modify it, it will cause an exception. |
1057 | */ |
1058 | static const struct hlist_head empty_buckets[1]; |
1059 | static const struct ftrace_hash empty_hash = { |
1060 | .buckets = (struct hlist_head *)empty_buckets, |
1061 | }; |
1062 | #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) |
1063 | |
1064 | struct ftrace_ops global_ops = { |
1065 | .func = ftrace_stub, |
1066 | .local_hash.notrace_hash = EMPTY_HASH, |
1067 | .local_hash.filter_hash = EMPTY_HASH, |
1068 | INIT_OPS_HASH(global_ops) |
1069 | .flags = FTRACE_OPS_FL_INITIALIZED | |
1070 | FTRACE_OPS_FL_PID, |
1071 | }; |
1072 | |
1073 | /* |
1074 | * Used by the stack unwinder to know about dynamic ftrace trampolines. |
1075 | */ |
1076 | struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) |
1077 | { |
1078 | struct ftrace_ops *op = NULL; |
1079 | |
1080 | /* |
1081 | * Some of the ops may be dynamically allocated, |
1082 | * they are freed after a synchronize_rcu(). |
1083 | */ |
1084 | preempt_disable_notrace(); |
1085 | |
1086 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1087 | /* |
1088 | * This is to check for dynamically allocated trampolines. |
1089 | * Trampolines that are in kernel text will have |
1090 | * core_kernel_text() return true. |
1091 | */ |
1092 | if (op->trampoline && op->trampoline_size) |
1093 | if (addr >= op->trampoline && |
1094 | addr < op->trampoline + op->trampoline_size) { |
1095 | preempt_enable_notrace(); |
1096 | return op; |
1097 | } |
1098 | } while_for_each_ftrace_op(op); |
1099 | preempt_enable_notrace(); |
1100 | |
1101 | return NULL; |
1102 | } |
1103 | |
1104 | /* |
1105 | * This is used by __kernel_text_address() to return true if the |
1106 | * address is on a dynamically allocated trampoline that would |
1107 | * not return true for either core_kernel_text() or |
1108 | * is_module_text_address(). |
1109 | */ |
1110 | bool is_ftrace_trampoline(unsigned long addr) |
1111 | { |
1112 | return ftrace_ops_trampoline(addr) != NULL; |
1113 | } |
1114 | |
1115 | struct ftrace_page { |
1116 | struct ftrace_page *next; |
1117 | struct dyn_ftrace *records; |
1118 | int index; |
1119 | int order; |
1120 | }; |
1121 | |
1122 | #define ENTRY_SIZE sizeof(struct dyn_ftrace) |
1123 | #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) |
1124 | |
1125 | static struct ftrace_page *ftrace_pages_start; |
1126 | static struct ftrace_page *ftrace_pages; |
1127 | |
1128 | static __always_inline unsigned long |
1129 | ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) |
1130 | { |
1131 | if (hash->size_bits > 0) |
1132 | return hash_long(ip, hash->size_bits); |
1133 | |
1134 | return 0; |
1135 | } |
1136 | |
1137 | /* Only use this function if ftrace_hash_empty() has already been tested */ |
1138 | static __always_inline struct ftrace_func_entry * |
1139 | __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) |
1140 | { |
1141 | unsigned long key; |
1142 | struct ftrace_func_entry *entry; |
1143 | struct hlist_head *hhd; |
1144 | |
1145 | key = ftrace_hash_key(hash, ip); |
1146 | hhd = &hash->buckets[key]; |
1147 | |
1148 | hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { |
1149 | if (entry->ip == ip) |
1150 | return entry; |
1151 | } |
1152 | return NULL; |
1153 | } |
1154 | |
1155 | /** |
1156 | * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash |
1157 | * @hash: The hash to look at |
1158 | * @ip: The instruction pointer to test |
1159 | * |
1160 | * Search a given @hash to see if a given instruction pointer (@ip) |
1161 | * exists in it. |
1162 | * |
1163 | * Returns the entry that holds the @ip if found. NULL otherwise. |
1164 | */ |
1165 | struct ftrace_func_entry * |
1166 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) |
1167 | { |
1168 | if (ftrace_hash_empty(hash)) |
1169 | return NULL; |
1170 | |
1171 | return __ftrace_lookup_ip(hash, ip); |
1172 | } |
1173 | |
1174 | static void __add_hash_entry(struct ftrace_hash *hash, |
1175 | struct ftrace_func_entry *entry) |
1176 | { |
1177 | struct hlist_head *hhd; |
1178 | unsigned long key; |
1179 | |
1180 | key = ftrace_hash_key(hash, ip: entry->ip); |
1181 | hhd = &hash->buckets[key]; |
1182 | hlist_add_head(n: &entry->hlist, h: hhd); |
1183 | hash->count++; |
1184 | } |
1185 | |
1186 | static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) |
1187 | { |
1188 | struct ftrace_func_entry *entry; |
1189 | |
1190 | entry = kmalloc(size: sizeof(*entry), GFP_KERNEL); |
1191 | if (!entry) |
1192 | return -ENOMEM; |
1193 | |
1194 | entry->ip = ip; |
1195 | __add_hash_entry(hash, entry); |
1196 | |
1197 | return 0; |
1198 | } |
1199 | |
1200 | static void |
1201 | free_hash_entry(struct ftrace_hash *hash, |
1202 | struct ftrace_func_entry *entry) |
1203 | { |
1204 | hlist_del(n: &entry->hlist); |
1205 | kfree(objp: entry); |
1206 | hash->count--; |
1207 | } |
1208 | |
1209 | static void |
1210 | remove_hash_entry(struct ftrace_hash *hash, |
1211 | struct ftrace_func_entry *entry) |
1212 | { |
1213 | hlist_del_rcu(n: &entry->hlist); |
1214 | hash->count--; |
1215 | } |
1216 | |
1217 | static void ftrace_hash_clear(struct ftrace_hash *hash) |
1218 | { |
1219 | struct hlist_head *hhd; |
1220 | struct hlist_node *tn; |
1221 | struct ftrace_func_entry *entry; |
1222 | int size = 1 << hash->size_bits; |
1223 | int i; |
1224 | |
1225 | if (!hash->count) |
1226 | return; |
1227 | |
1228 | for (i = 0; i < size; i++) { |
1229 | hhd = &hash->buckets[i]; |
1230 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) |
1231 | free_hash_entry(hash, entry); |
1232 | } |
1233 | FTRACE_WARN_ON(hash->count); |
1234 | } |
1235 | |
1236 | static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) |
1237 | { |
1238 | list_del(entry: &ftrace_mod->list); |
1239 | kfree(objp: ftrace_mod->module); |
1240 | kfree(objp: ftrace_mod->func); |
1241 | kfree(objp: ftrace_mod); |
1242 | } |
1243 | |
1244 | static void clear_ftrace_mod_list(struct list_head *head) |
1245 | { |
1246 | struct ftrace_mod_load *p, *n; |
1247 | |
1248 | /* stack tracer isn't supported yet */ |
1249 | if (!head) |
1250 | return; |
1251 | |
1252 | mutex_lock(&ftrace_lock); |
1253 | list_for_each_entry_safe(p, n, head, list) |
1254 | free_ftrace_mod(ftrace_mod: p); |
1255 | mutex_unlock(lock: &ftrace_lock); |
1256 | } |
1257 | |
1258 | static void free_ftrace_hash(struct ftrace_hash *hash) |
1259 | { |
1260 | if (!hash || hash == EMPTY_HASH) |
1261 | return; |
1262 | ftrace_hash_clear(hash); |
1263 | kfree(objp: hash->buckets); |
1264 | kfree(objp: hash); |
1265 | } |
1266 | |
1267 | static void __free_ftrace_hash_rcu(struct rcu_head *rcu) |
1268 | { |
1269 | struct ftrace_hash *hash; |
1270 | |
1271 | hash = container_of(rcu, struct ftrace_hash, rcu); |
1272 | free_ftrace_hash(hash); |
1273 | } |
1274 | |
1275 | static void free_ftrace_hash_rcu(struct ftrace_hash *hash) |
1276 | { |
1277 | if (!hash || hash == EMPTY_HASH) |
1278 | return; |
1279 | call_rcu(head: &hash->rcu, func: __free_ftrace_hash_rcu); |
1280 | } |
1281 | |
1282 | /** |
1283 | * ftrace_free_filter - remove all filters for an ftrace_ops |
1284 | * @ops - the ops to remove the filters from |
1285 | */ |
1286 | void ftrace_free_filter(struct ftrace_ops *ops) |
1287 | { |
1288 | ftrace_ops_init(ops); |
1289 | free_ftrace_hash(hash: ops->func_hash->filter_hash); |
1290 | free_ftrace_hash(hash: ops->func_hash->notrace_hash); |
1291 | } |
1292 | EXPORT_SYMBOL_GPL(ftrace_free_filter); |
1293 | |
1294 | static struct ftrace_hash *alloc_ftrace_hash(int size_bits) |
1295 | { |
1296 | struct ftrace_hash *hash; |
1297 | int size; |
1298 | |
1299 | hash = kzalloc(size: sizeof(*hash), GFP_KERNEL); |
1300 | if (!hash) |
1301 | return NULL; |
1302 | |
1303 | size = 1 << size_bits; |
1304 | hash->buckets = kcalloc(n: size, size: sizeof(*hash->buckets), GFP_KERNEL); |
1305 | |
1306 | if (!hash->buckets) { |
1307 | kfree(objp: hash); |
1308 | return NULL; |
1309 | } |
1310 | |
1311 | hash->size_bits = size_bits; |
1312 | |
1313 | return hash; |
1314 | } |
1315 | |
1316 | |
1317 | static int ftrace_add_mod(struct trace_array *tr, |
1318 | const char *func, const char *module, |
1319 | int enable) |
1320 | { |
1321 | struct ftrace_mod_load *ftrace_mod; |
1322 | struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; |
1323 | |
1324 | ftrace_mod = kzalloc(size: sizeof(*ftrace_mod), GFP_KERNEL); |
1325 | if (!ftrace_mod) |
1326 | return -ENOMEM; |
1327 | |
1328 | INIT_LIST_HEAD(list: &ftrace_mod->list); |
1329 | ftrace_mod->func = kstrdup(s: func, GFP_KERNEL); |
1330 | ftrace_mod->module = kstrdup(s: module, GFP_KERNEL); |
1331 | ftrace_mod->enable = enable; |
1332 | |
1333 | if (!ftrace_mod->func || !ftrace_mod->module) |
1334 | goto out_free; |
1335 | |
1336 | list_add(new: &ftrace_mod->list, head: mod_head); |
1337 | |
1338 | return 0; |
1339 | |
1340 | out_free: |
1341 | free_ftrace_mod(ftrace_mod); |
1342 | |
1343 | return -ENOMEM; |
1344 | } |
1345 | |
1346 | static struct ftrace_hash * |
1347 | alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) |
1348 | { |
1349 | struct ftrace_func_entry *entry; |
1350 | struct ftrace_hash *new_hash; |
1351 | int size; |
1352 | int ret; |
1353 | int i; |
1354 | |
1355 | new_hash = alloc_ftrace_hash(size_bits); |
1356 | if (!new_hash) |
1357 | return NULL; |
1358 | |
1359 | if (hash) |
1360 | new_hash->flags = hash->flags; |
1361 | |
1362 | /* Empty hash? */ |
1363 | if (ftrace_hash_empty(hash)) |
1364 | return new_hash; |
1365 | |
1366 | size = 1 << hash->size_bits; |
1367 | for (i = 0; i < size; i++) { |
1368 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
1369 | ret = add_hash_entry(hash: new_hash, ip: entry->ip); |
1370 | if (ret < 0) |
1371 | goto free_hash; |
1372 | } |
1373 | } |
1374 | |
1375 | FTRACE_WARN_ON(new_hash->count != hash->count); |
1376 | |
1377 | return new_hash; |
1378 | |
1379 | free_hash: |
1380 | free_ftrace_hash(hash: new_hash); |
1381 | return NULL; |
1382 | } |
1383 | |
1384 | static void |
1385 | ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); |
1386 | static void |
1387 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
1388 | |
1389 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
1390 | struct ftrace_hash *new_hash); |
1391 | |
1392 | static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) |
1393 | { |
1394 | struct ftrace_func_entry *entry; |
1395 | struct ftrace_hash *new_hash; |
1396 | struct hlist_head *hhd; |
1397 | struct hlist_node *tn; |
1398 | int bits = 0; |
1399 | int i; |
1400 | |
1401 | /* |
1402 | * Use around half the size (max bit of it), but |
1403 | * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits). |
1404 | */ |
1405 | bits = fls(x: size / 2); |
1406 | |
1407 | /* Don't allocate too much */ |
1408 | if (bits > FTRACE_HASH_MAX_BITS) |
1409 | bits = FTRACE_HASH_MAX_BITS; |
1410 | |
1411 | new_hash = alloc_ftrace_hash(size_bits: bits); |
1412 | if (!new_hash) |
1413 | return NULL; |
1414 | |
1415 | new_hash->flags = src->flags; |
1416 | |
1417 | size = 1 << src->size_bits; |
1418 | for (i = 0; i < size; i++) { |
1419 | hhd = &src->buckets[i]; |
1420 | hlist_for_each_entry_safe(entry, tn, hhd, hlist) { |
1421 | remove_hash_entry(hash: src, entry); |
1422 | __add_hash_entry(hash: new_hash, entry); |
1423 | } |
1424 | } |
1425 | return new_hash; |
1426 | } |
1427 | |
1428 | static struct ftrace_hash * |
1429 | __ftrace_hash_move(struct ftrace_hash *src) |
1430 | { |
1431 | int size = src->count; |
1432 | |
1433 | /* |
1434 | * If the new source is empty, just return the empty_hash. |
1435 | */ |
1436 | if (ftrace_hash_empty(hash: src)) |
1437 | return EMPTY_HASH; |
1438 | |
1439 | return dup_hash(src, size); |
1440 | } |
1441 | |
1442 | static int |
1443 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
1444 | struct ftrace_hash **dst, struct ftrace_hash *src) |
1445 | { |
1446 | struct ftrace_hash *new_hash; |
1447 | int ret; |
1448 | |
1449 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ |
1450 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) |
1451 | return -EINVAL; |
1452 | |
1453 | new_hash = __ftrace_hash_move(src); |
1454 | if (!new_hash) |
1455 | return -ENOMEM; |
1456 | |
1457 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ |
1458 | if (enable) { |
1459 | /* IPMODIFY should be updated only when filter_hash updating */ |
1460 | ret = ftrace_hash_ipmodify_update(ops, new_hash); |
1461 | if (ret < 0) { |
1462 | free_ftrace_hash(hash: new_hash); |
1463 | return ret; |
1464 | } |
1465 | } |
1466 | |
1467 | /* |
1468 | * Remove the current set, update the hash and add |
1469 | * them back. |
1470 | */ |
1471 | ftrace_hash_rec_disable_modify(ops, filter_hash: enable); |
1472 | |
1473 | rcu_assign_pointer(*dst, new_hash); |
1474 | |
1475 | ftrace_hash_rec_enable_modify(ops, filter_hash: enable); |
1476 | |
1477 | return 0; |
1478 | } |
1479 | |
1480 | static bool hash_contains_ip(unsigned long ip, |
1481 | struct ftrace_ops_hash *hash) |
1482 | { |
1483 | /* |
1484 | * The function record is a match if it exists in the filter |
1485 | * hash and not in the notrace hash. Note, an empty hash is |
1486 | * considered a match for the filter hash, but an empty |
1487 | * notrace hash is considered not in the notrace hash. |
1488 | */ |
1489 | return (ftrace_hash_empty(hash: hash->filter_hash) || |
1490 | __ftrace_lookup_ip(hash: hash->filter_hash, ip)) && |
1491 | (ftrace_hash_empty(hash: hash->notrace_hash) || |
1492 | !__ftrace_lookup_ip(hash: hash->notrace_hash, ip)); |
1493 | } |
1494 | |
1495 | /* |
1496 | * Test the hashes for this ops to see if we want to call |
1497 | * the ops->func or not. |
1498 | * |
1499 | * It's a match if the ip is in the ops->filter_hash or |
1500 | * the filter_hash does not exist or is empty, |
1501 | * AND |
1502 | * the ip is not in the ops->notrace_hash. |
1503 | * |
1504 | * This needs to be called with preemption disabled as |
1505 | * the hashes are freed with call_rcu(). |
1506 | */ |
1507 | int |
1508 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1509 | { |
1510 | struct ftrace_ops_hash hash; |
1511 | int ret; |
1512 | |
1513 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
1514 | /* |
1515 | * There's a small race when adding ops that the ftrace handler |
1516 | * that wants regs, may be called without them. We can not |
1517 | * allow that handler to be called if regs is NULL. |
1518 | */ |
1519 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) |
1520 | return 0; |
1521 | #endif |
1522 | |
1523 | rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); |
1524 | rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); |
1525 | |
1526 | if (hash_contains_ip(ip, hash: &hash)) |
1527 | ret = 1; |
1528 | else |
1529 | ret = 0; |
1530 | |
1531 | return ret; |
1532 | } |
1533 | |
1534 | /* |
1535 | * This is a double for. Do not use 'break' to break out of the loop, |
1536 | * you must use a goto. |
1537 | */ |
1538 | #define do_for_each_ftrace_rec(pg, rec) \ |
1539 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ |
1540 | int _____i; \ |
1541 | for (_____i = 0; _____i < pg->index; _____i++) { \ |
1542 | rec = &pg->records[_____i]; |
1543 | |
1544 | #define while_for_each_ftrace_rec() \ |
1545 | } \ |
1546 | } |
1547 | |
1548 | |
1549 | static int ftrace_cmp_recs(const void *a, const void *b) |
1550 | { |
1551 | const struct dyn_ftrace *key = a; |
1552 | const struct dyn_ftrace *rec = b; |
1553 | |
1554 | if (key->flags < rec->ip) |
1555 | return -1; |
1556 | if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) |
1557 | return 1; |
1558 | return 0; |
1559 | } |
1560 | |
1561 | static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) |
1562 | { |
1563 | struct ftrace_page *pg; |
1564 | struct dyn_ftrace *rec = NULL; |
1565 | struct dyn_ftrace key; |
1566 | |
1567 | key.ip = start; |
1568 | key.flags = end; /* overload flags, as it is unsigned long */ |
1569 | |
1570 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
1571 | if (pg->index == 0 || |
1572 | end < pg->records[0].ip || |
1573 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
1574 | continue; |
1575 | rec = bsearch(key: &key, base: pg->records, num: pg->index, |
1576 | size: sizeof(struct dyn_ftrace), |
1577 | cmp: ftrace_cmp_recs); |
1578 | if (rec) |
1579 | break; |
1580 | } |
1581 | return rec; |
1582 | } |
1583 | |
1584 | /** |
1585 | * ftrace_location_range - return the first address of a traced location |
1586 | * if it touches the given ip range |
1587 | * @start: start of range to search. |
1588 | * @end: end of range to search (inclusive). @end points to the last byte |
1589 | * to check. |
1590 | * |
1591 | * Returns rec->ip if the related ftrace location is a least partly within |
1592 | * the given address range. That is, the first address of the instruction |
1593 | * that is either a NOP or call to the function tracer. It checks the ftrace |
1594 | * internal tables to determine if the address belongs or not. |
1595 | */ |
1596 | unsigned long ftrace_location_range(unsigned long start, unsigned long end) |
1597 | { |
1598 | struct dyn_ftrace *rec; |
1599 | |
1600 | rec = lookup_rec(start, end); |
1601 | if (rec) |
1602 | return rec->ip; |
1603 | |
1604 | return 0; |
1605 | } |
1606 | |
1607 | /** |
1608 | * ftrace_location - return the ftrace location |
1609 | * @ip: the instruction pointer to check |
1610 | * |
1611 | * If @ip matches the ftrace location, return @ip. |
1612 | * If @ip matches sym+0, return sym's ftrace location. |
1613 | * Otherwise, return 0. |
1614 | */ |
1615 | unsigned long ftrace_location(unsigned long ip) |
1616 | { |
1617 | struct dyn_ftrace *rec; |
1618 | unsigned long offset; |
1619 | unsigned long size; |
1620 | |
1621 | rec = lookup_rec(start: ip, end: ip); |
1622 | if (!rec) { |
1623 | if (!kallsyms_lookup_size_offset(addr: ip, symbolsize: &size, offset: &offset)) |
1624 | goto out; |
1625 | |
1626 | /* map sym+0 to __fentry__ */ |
1627 | if (!offset) |
1628 | rec = lookup_rec(start: ip, end: ip + size - 1); |
1629 | } |
1630 | |
1631 | if (rec) |
1632 | return rec->ip; |
1633 | |
1634 | out: |
1635 | return 0; |
1636 | } |
1637 | |
1638 | /** |
1639 | * ftrace_text_reserved - return true if range contains an ftrace location |
1640 | * @start: start of range to search |
1641 | * @end: end of range to search (inclusive). @end points to the last byte to check. |
1642 | * |
1643 | * Returns 1 if @start and @end contains a ftrace location. |
1644 | * That is, the instruction that is either a NOP or call to |
1645 | * the function tracer. It checks the ftrace internal tables to |
1646 | * determine if the address belongs or not. |
1647 | */ |
1648 | int ftrace_text_reserved(const void *start, const void *end) |
1649 | { |
1650 | unsigned long ret; |
1651 | |
1652 | ret = ftrace_location_range(start: (unsigned long)start, |
1653 | end: (unsigned long)end); |
1654 | |
1655 | return (int)!!ret; |
1656 | } |
1657 | |
1658 | /* Test if ops registered to this rec needs regs */ |
1659 | static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) |
1660 | { |
1661 | struct ftrace_ops *ops; |
1662 | bool keep_regs = false; |
1663 | |
1664 | for (ops = ftrace_ops_list; |
1665 | ops != &ftrace_list_end; ops = ops->next) { |
1666 | /* pass rec in as regs to have non-NULL val */ |
1667 | if (ftrace_ops_test(ops, ip: rec->ip, regs: rec)) { |
1668 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { |
1669 | keep_regs = true; |
1670 | break; |
1671 | } |
1672 | } |
1673 | } |
1674 | |
1675 | return keep_regs; |
1676 | } |
1677 | |
1678 | static struct ftrace_ops * |
1679 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); |
1680 | static struct ftrace_ops * |
1681 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude); |
1682 | static struct ftrace_ops * |
1683 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); |
1684 | |
1685 | static bool skip_record(struct dyn_ftrace *rec) |
1686 | { |
1687 | /* |
1688 | * At boot up, weak functions are set to disable. Function tracing |
1689 | * can be enabled before they are, and they still need to be disabled now. |
1690 | * If the record is disabled, still continue if it is marked as already |
1691 | * enabled (this is needed to keep the accounting working). |
1692 | */ |
1693 | return rec->flags & FTRACE_FL_DISABLED && |
1694 | !(rec->flags & FTRACE_FL_ENABLED); |
1695 | } |
1696 | |
1697 | static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, |
1698 | int filter_hash, |
1699 | bool inc) |
1700 | { |
1701 | struct ftrace_hash *hash; |
1702 | struct ftrace_hash *other_hash; |
1703 | struct ftrace_page *pg; |
1704 | struct dyn_ftrace *rec; |
1705 | bool update = false; |
1706 | int count = 0; |
1707 | int all = false; |
1708 | |
1709 | /* Only update if the ops has been registered */ |
1710 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1711 | return false; |
1712 | |
1713 | /* |
1714 | * In the filter_hash case: |
1715 | * If the count is zero, we update all records. |
1716 | * Otherwise we just update the items in the hash. |
1717 | * |
1718 | * In the notrace_hash case: |
1719 | * We enable the update in the hash. |
1720 | * As disabling notrace means enabling the tracing, |
1721 | * and enabling notrace means disabling, the inc variable |
1722 | * gets inversed. |
1723 | */ |
1724 | if (filter_hash) { |
1725 | hash = ops->func_hash->filter_hash; |
1726 | other_hash = ops->func_hash->notrace_hash; |
1727 | if (ftrace_hash_empty(hash)) |
1728 | all = true; |
1729 | } else { |
1730 | inc = !inc; |
1731 | hash = ops->func_hash->notrace_hash; |
1732 | other_hash = ops->func_hash->filter_hash; |
1733 | /* |
1734 | * If the notrace hash has no items, |
1735 | * then there's nothing to do. |
1736 | */ |
1737 | if (ftrace_hash_empty(hash)) |
1738 | return false; |
1739 | } |
1740 | |
1741 | do_for_each_ftrace_rec(pg, rec) { |
1742 | int in_other_hash = 0; |
1743 | int in_hash = 0; |
1744 | int match = 0; |
1745 | |
1746 | if (skip_record(rec)) |
1747 | continue; |
1748 | |
1749 | if (all) { |
1750 | /* |
1751 | * Only the filter_hash affects all records. |
1752 | * Update if the record is not in the notrace hash. |
1753 | */ |
1754 | if (!other_hash || !ftrace_lookup_ip(hash: other_hash, ip: rec->ip)) |
1755 | match = 1; |
1756 | } else { |
1757 | in_hash = !!ftrace_lookup_ip(hash, ip: rec->ip); |
1758 | in_other_hash = !!ftrace_lookup_ip(hash: other_hash, ip: rec->ip); |
1759 | |
1760 | /* |
1761 | * If filter_hash is set, we want to match all functions |
1762 | * that are in the hash but not in the other hash. |
1763 | * |
1764 | * If filter_hash is not set, then we are decrementing. |
1765 | * That means we match anything that is in the hash |
1766 | * and also in the other_hash. That is, we need to turn |
1767 | * off functions in the other hash because they are disabled |
1768 | * by this hash. |
1769 | */ |
1770 | if (filter_hash && in_hash && !in_other_hash) |
1771 | match = 1; |
1772 | else if (!filter_hash && in_hash && |
1773 | (in_other_hash || ftrace_hash_empty(hash: other_hash))) |
1774 | match = 1; |
1775 | } |
1776 | if (!match) |
1777 | continue; |
1778 | |
1779 | if (inc) { |
1780 | rec->flags++; |
1781 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) |
1782 | return false; |
1783 | |
1784 | if (ops->flags & FTRACE_OPS_FL_DIRECT) |
1785 | rec->flags |= FTRACE_FL_DIRECT; |
1786 | |
1787 | /* |
1788 | * If there's only a single callback registered to a |
1789 | * function, and the ops has a trampoline registered |
1790 | * for it, then we can call it directly. |
1791 | */ |
1792 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) |
1793 | rec->flags |= FTRACE_FL_TRAMP; |
1794 | else |
1795 | /* |
1796 | * If we are adding another function callback |
1797 | * to this function, and the previous had a |
1798 | * custom trampoline in use, then we need to go |
1799 | * back to the default trampoline. |
1800 | */ |
1801 | rec->flags &= ~FTRACE_FL_TRAMP; |
1802 | |
1803 | /* |
1804 | * If any ops wants regs saved for this function |
1805 | * then all ops will get saved regs. |
1806 | */ |
1807 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
1808 | rec->flags |= FTRACE_FL_REGS; |
1809 | } else { |
1810 | if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) |
1811 | return false; |
1812 | rec->flags--; |
1813 | |
1814 | /* |
1815 | * Only the internal direct_ops should have the |
1816 | * DIRECT flag set. Thus, if it is removing a |
1817 | * function, then that function should no longer |
1818 | * be direct. |
1819 | */ |
1820 | if (ops->flags & FTRACE_OPS_FL_DIRECT) |
1821 | rec->flags &= ~FTRACE_FL_DIRECT; |
1822 | |
1823 | /* |
1824 | * If the rec had REGS enabled and the ops that is |
1825 | * being removed had REGS set, then see if there is |
1826 | * still any ops for this record that wants regs. |
1827 | * If not, we can stop recording them. |
1828 | */ |
1829 | if (ftrace_rec_count(rec) > 0 && |
1830 | rec->flags & FTRACE_FL_REGS && |
1831 | ops->flags & FTRACE_OPS_FL_SAVE_REGS) { |
1832 | if (!test_rec_ops_needs_regs(rec)) |
1833 | rec->flags &= ~FTRACE_FL_REGS; |
1834 | } |
1835 | |
1836 | /* |
1837 | * The TRAMP needs to be set only if rec count |
1838 | * is decremented to one, and the ops that is |
1839 | * left has a trampoline. As TRAMP can only be |
1840 | * enabled if there is only a single ops attached |
1841 | * to it. |
1842 | */ |
1843 | if (ftrace_rec_count(rec) == 1 && |
1844 | ftrace_find_tramp_ops_any_other(rec, op_exclude: ops)) |
1845 | rec->flags |= FTRACE_FL_TRAMP; |
1846 | else |
1847 | rec->flags &= ~FTRACE_FL_TRAMP; |
1848 | |
1849 | /* |
1850 | * flags will be cleared in ftrace_check_record() |
1851 | * if rec count is zero. |
1852 | */ |
1853 | } |
1854 | |
1855 | /* |
1856 | * If the rec has a single associated ops, and ops->func can be |
1857 | * called directly, allow the call site to call via the ops. |
1858 | */ |
1859 | if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) && |
1860 | ftrace_rec_count(rec) == 1 && |
1861 | ftrace_ops_get_func(ops) == ops->func) |
1862 | rec->flags |= FTRACE_FL_CALL_OPS; |
1863 | else |
1864 | rec->flags &= ~FTRACE_FL_CALL_OPS; |
1865 | |
1866 | count++; |
1867 | |
1868 | /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ |
1869 | update |= ftrace_test_record(rec, enable: true) != FTRACE_UPDATE_IGNORE; |
1870 | |
1871 | /* Shortcut, if we handled all records, we are done. */ |
1872 | if (!all && count == hash->count) |
1873 | return update; |
1874 | } while_for_each_ftrace_rec(); |
1875 | |
1876 | return update; |
1877 | } |
1878 | |
1879 | static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, |
1880 | int filter_hash) |
1881 | { |
1882 | return __ftrace_hash_rec_update(ops, filter_hash, inc: 0); |
1883 | } |
1884 | |
1885 | static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, |
1886 | int filter_hash) |
1887 | { |
1888 | return __ftrace_hash_rec_update(ops, filter_hash, inc: 1); |
1889 | } |
1890 | |
1891 | static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, |
1892 | int filter_hash, int inc) |
1893 | { |
1894 | struct ftrace_ops *op; |
1895 | |
1896 | __ftrace_hash_rec_update(ops, filter_hash, inc); |
1897 | |
1898 | if (ops->func_hash != &global_ops.local_hash) |
1899 | return; |
1900 | |
1901 | /* |
1902 | * If the ops shares the global_ops hash, then we need to update |
1903 | * all ops that are enabled and use this hash. |
1904 | */ |
1905 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
1906 | /* Already done */ |
1907 | if (op == ops) |
1908 | continue; |
1909 | if (op->func_hash == &global_ops.local_hash) |
1910 | __ftrace_hash_rec_update(ops: op, filter_hash, inc); |
1911 | } while_for_each_ftrace_op(op); |
1912 | } |
1913 | |
1914 | static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, |
1915 | int filter_hash) |
1916 | { |
1917 | ftrace_hash_rec_update_modify(ops, filter_hash, inc: 0); |
1918 | } |
1919 | |
1920 | static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, |
1921 | int filter_hash) |
1922 | { |
1923 | ftrace_hash_rec_update_modify(ops, filter_hash, inc: 1); |
1924 | } |
1925 | |
1926 | /* |
1927 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK |
1928 | * or no-needed to update, -EBUSY if it detects a conflict of the flag |
1929 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. |
1930 | * Note that old_hash and new_hash has below meanings |
1931 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) |
1932 | * - If the hash is EMPTY_HASH, it hits nothing |
1933 | * - Anything else hits the recs which match the hash entries. |
1934 | * |
1935 | * DIRECT ops does not have IPMODIFY flag, but we still need to check it |
1936 | * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call |
1937 | * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with |
1938 | * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate |
1939 | * the return value to the caller and eventually to the owner of the DIRECT |
1940 | * ops. |
1941 | */ |
1942 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, |
1943 | struct ftrace_hash *old_hash, |
1944 | struct ftrace_hash *new_hash) |
1945 | { |
1946 | struct ftrace_page *pg; |
1947 | struct dyn_ftrace *rec, *end = NULL; |
1948 | int in_old, in_new; |
1949 | bool is_ipmodify, is_direct; |
1950 | |
1951 | /* Only update if the ops has been registered */ |
1952 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
1953 | return 0; |
1954 | |
1955 | is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY; |
1956 | is_direct = ops->flags & FTRACE_OPS_FL_DIRECT; |
1957 | |
1958 | /* neither IPMODIFY nor DIRECT, skip */ |
1959 | if (!is_ipmodify && !is_direct) |
1960 | return 0; |
1961 | |
1962 | if (WARN_ON_ONCE(is_ipmodify && is_direct)) |
1963 | return 0; |
1964 | |
1965 | /* |
1966 | * Since the IPMODIFY and DIRECT are very address sensitive |
1967 | * actions, we do not allow ftrace_ops to set all functions to new |
1968 | * hash. |
1969 | */ |
1970 | if (!new_hash || !old_hash) |
1971 | return -EINVAL; |
1972 | |
1973 | /* Update rec->flags */ |
1974 | do_for_each_ftrace_rec(pg, rec) { |
1975 | |
1976 | if (rec->flags & FTRACE_FL_DISABLED) |
1977 | continue; |
1978 | |
1979 | /* We need to update only differences of filter_hash */ |
1980 | in_old = !!ftrace_lookup_ip(hash: old_hash, ip: rec->ip); |
1981 | in_new = !!ftrace_lookup_ip(hash: new_hash, ip: rec->ip); |
1982 | if (in_old == in_new) |
1983 | continue; |
1984 | |
1985 | if (in_new) { |
1986 | if (rec->flags & FTRACE_FL_IPMODIFY) { |
1987 | int ret; |
1988 | |
1989 | /* Cannot have two ipmodify on same rec */ |
1990 | if (is_ipmodify) |
1991 | goto rollback; |
1992 | |
1993 | FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT); |
1994 | |
1995 | /* |
1996 | * Another ops with IPMODIFY is already |
1997 | * attached. We are now attaching a direct |
1998 | * ops. Run SHARE_IPMODIFY_SELF, to check |
1999 | * whether sharing is supported. |
2000 | */ |
2001 | if (!ops->ops_func) |
2002 | return -EBUSY; |
2003 | ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); |
2004 | if (ret) |
2005 | return ret; |
2006 | } else if (is_ipmodify) { |
2007 | rec->flags |= FTRACE_FL_IPMODIFY; |
2008 | } |
2009 | } else if (is_ipmodify) { |
2010 | rec->flags &= ~FTRACE_FL_IPMODIFY; |
2011 | } |
2012 | } while_for_each_ftrace_rec(); |
2013 | |
2014 | return 0; |
2015 | |
2016 | rollback: |
2017 | end = rec; |
2018 | |
2019 | /* Roll back what we did above */ |
2020 | do_for_each_ftrace_rec(pg, rec) { |
2021 | |
2022 | if (rec->flags & FTRACE_FL_DISABLED) |
2023 | continue; |
2024 | |
2025 | if (rec == end) |
2026 | goto err_out; |
2027 | |
2028 | in_old = !!ftrace_lookup_ip(hash: old_hash, ip: rec->ip); |
2029 | in_new = !!ftrace_lookup_ip(hash: new_hash, ip: rec->ip); |
2030 | if (in_old == in_new) |
2031 | continue; |
2032 | |
2033 | if (in_new) |
2034 | rec->flags &= ~FTRACE_FL_IPMODIFY; |
2035 | else |
2036 | rec->flags |= FTRACE_FL_IPMODIFY; |
2037 | } while_for_each_ftrace_rec(); |
2038 | |
2039 | err_out: |
2040 | return -EBUSY; |
2041 | } |
2042 | |
2043 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) |
2044 | { |
2045 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
2046 | |
2047 | if (ftrace_hash_empty(hash)) |
2048 | hash = NULL; |
2049 | |
2050 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, new_hash: hash); |
2051 | } |
2052 | |
2053 | /* Disabling always succeeds */ |
2054 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) |
2055 | { |
2056 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
2057 | |
2058 | if (ftrace_hash_empty(hash)) |
2059 | hash = NULL; |
2060 | |
2061 | __ftrace_hash_update_ipmodify(ops, old_hash: hash, EMPTY_HASH); |
2062 | } |
2063 | |
2064 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, |
2065 | struct ftrace_hash *new_hash) |
2066 | { |
2067 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; |
2068 | |
2069 | if (ftrace_hash_empty(hash: old_hash)) |
2070 | old_hash = NULL; |
2071 | |
2072 | if (ftrace_hash_empty(hash: new_hash)) |
2073 | new_hash = NULL; |
2074 | |
2075 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); |
2076 | } |
2077 | |
2078 | static void print_ip_ins(const char *fmt, const unsigned char *p) |
2079 | { |
2080 | char ins[MCOUNT_INSN_SIZE]; |
2081 | |
2082 | if (copy_from_kernel_nofault(dst: ins, src: p, MCOUNT_INSN_SIZE)) { |
2083 | printk(KERN_CONT "%s[FAULT] %px\n" , fmt, p); |
2084 | return; |
2085 | } |
2086 | |
2087 | printk(KERN_CONT "%s" , fmt); |
2088 | pr_cont("%*phC" , MCOUNT_INSN_SIZE, ins); |
2089 | } |
2090 | |
2091 | enum ftrace_bug_type ftrace_bug_type; |
2092 | const void *ftrace_expected; |
2093 | |
2094 | static void print_bug_type(void) |
2095 | { |
2096 | switch (ftrace_bug_type) { |
2097 | case FTRACE_BUG_UNKNOWN: |
2098 | break; |
2099 | case FTRACE_BUG_INIT: |
2100 | pr_info("Initializing ftrace call sites\n" ); |
2101 | break; |
2102 | case FTRACE_BUG_NOP: |
2103 | pr_info("Setting ftrace call site to NOP\n" ); |
2104 | break; |
2105 | case FTRACE_BUG_CALL: |
2106 | pr_info("Setting ftrace call site to call ftrace function\n" ); |
2107 | break; |
2108 | case FTRACE_BUG_UPDATE: |
2109 | pr_info("Updating ftrace call site to call a different ftrace function\n" ); |
2110 | break; |
2111 | } |
2112 | } |
2113 | |
2114 | /** |
2115 | * ftrace_bug - report and shutdown function tracer |
2116 | * @failed: The failed type (EFAULT, EINVAL, EPERM) |
2117 | * @rec: The record that failed |
2118 | * |
2119 | * The arch code that enables or disables the function tracing |
2120 | * can call ftrace_bug() when it has detected a problem in |
2121 | * modifying the code. @failed should be one of either: |
2122 | * EFAULT - if the problem happens on reading the @ip address |
2123 | * EINVAL - if what is read at @ip is not what was expected |
2124 | * EPERM - if the problem happens on writing to the @ip address |
2125 | */ |
2126 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
2127 | { |
2128 | unsigned long ip = rec ? rec->ip : 0; |
2129 | |
2130 | pr_info("------------[ ftrace bug ]------------\n" ); |
2131 | |
2132 | switch (failed) { |
2133 | case -EFAULT: |
2134 | pr_info("ftrace faulted on modifying " ); |
2135 | print_ip_sym(KERN_INFO, ip); |
2136 | break; |
2137 | case -EINVAL: |
2138 | pr_info("ftrace failed to modify " ); |
2139 | print_ip_sym(KERN_INFO, ip); |
2140 | print_ip_ins(fmt: " actual: " , p: (unsigned char *)ip); |
2141 | pr_cont("\n" ); |
2142 | if (ftrace_expected) { |
2143 | print_ip_ins(fmt: " expected: " , p: ftrace_expected); |
2144 | pr_cont("\n" ); |
2145 | } |
2146 | break; |
2147 | case -EPERM: |
2148 | pr_info("ftrace faulted on writing " ); |
2149 | print_ip_sym(KERN_INFO, ip); |
2150 | break; |
2151 | default: |
2152 | pr_info("ftrace faulted on unknown error " ); |
2153 | print_ip_sym(KERN_INFO, ip); |
2154 | } |
2155 | print_bug_type(); |
2156 | if (rec) { |
2157 | struct ftrace_ops *ops = NULL; |
2158 | |
2159 | pr_info("ftrace record flags: %lx\n" , rec->flags); |
2160 | pr_cont(" (%ld)%s%s" , ftrace_rec_count(rec), |
2161 | rec->flags & FTRACE_FL_REGS ? " R" : " " , |
2162 | rec->flags & FTRACE_FL_CALL_OPS ? " O" : " " ); |
2163 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2164 | ops = ftrace_find_tramp_ops_any(rec); |
2165 | if (ops) { |
2166 | do { |
2167 | pr_cont("\ttramp: %pS (%pS)" , |
2168 | (void *)ops->trampoline, |
2169 | (void *)ops->func); |
2170 | ops = ftrace_find_tramp_ops_next(rec, ops); |
2171 | } while (ops); |
2172 | } else |
2173 | pr_cont("\ttramp: ERROR!" ); |
2174 | |
2175 | } |
2176 | ip = ftrace_get_addr_curr(rec); |
2177 | pr_cont("\n expected tramp: %lx\n" , ip); |
2178 | } |
2179 | |
2180 | FTRACE_WARN_ON_ONCE(1); |
2181 | } |
2182 | |
2183 | static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) |
2184 | { |
2185 | unsigned long flag = 0UL; |
2186 | |
2187 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2188 | |
2189 | if (skip_record(rec)) |
2190 | return FTRACE_UPDATE_IGNORE; |
2191 | |
2192 | /* |
2193 | * If we are updating calls: |
2194 | * |
2195 | * If the record has a ref count, then we need to enable it |
2196 | * because someone is using it. |
2197 | * |
2198 | * Otherwise we make sure its disabled. |
2199 | * |
2200 | * If we are disabling calls, then disable all records that |
2201 | * are enabled. |
2202 | */ |
2203 | if (enable && ftrace_rec_count(rec)) |
2204 | flag = FTRACE_FL_ENABLED; |
2205 | |
2206 | /* |
2207 | * If enabling and the REGS flag does not match the REGS_EN, or |
2208 | * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore |
2209 | * this record. Set flags to fail the compare against ENABLED. |
2210 | * Same for direct calls. |
2211 | */ |
2212 | if (flag) { |
2213 | if (!(rec->flags & FTRACE_FL_REGS) != |
2214 | !(rec->flags & FTRACE_FL_REGS_EN)) |
2215 | flag |= FTRACE_FL_REGS; |
2216 | |
2217 | if (!(rec->flags & FTRACE_FL_TRAMP) != |
2218 | !(rec->flags & FTRACE_FL_TRAMP_EN)) |
2219 | flag |= FTRACE_FL_TRAMP; |
2220 | |
2221 | /* |
2222 | * Direct calls are special, as count matters. |
2223 | * We must test the record for direct, if the |
2224 | * DIRECT and DIRECT_EN do not match, but only |
2225 | * if the count is 1. That's because, if the |
2226 | * count is something other than one, we do not |
2227 | * want the direct enabled (it will be done via the |
2228 | * direct helper). But if DIRECT_EN is set, and |
2229 | * the count is not one, we need to clear it. |
2230 | * |
2231 | */ |
2232 | if (ftrace_rec_count(rec) == 1) { |
2233 | if (!(rec->flags & FTRACE_FL_DIRECT) != |
2234 | !(rec->flags & FTRACE_FL_DIRECT_EN)) |
2235 | flag |= FTRACE_FL_DIRECT; |
2236 | } else if (rec->flags & FTRACE_FL_DIRECT_EN) { |
2237 | flag |= FTRACE_FL_DIRECT; |
2238 | } |
2239 | |
2240 | /* |
2241 | * Ops calls are special, as count matters. |
2242 | * As with direct calls, they must only be enabled when count |
2243 | * is one, otherwise they'll be handled via the list ops. |
2244 | */ |
2245 | if (ftrace_rec_count(rec) == 1) { |
2246 | if (!(rec->flags & FTRACE_FL_CALL_OPS) != |
2247 | !(rec->flags & FTRACE_FL_CALL_OPS_EN)) |
2248 | flag |= FTRACE_FL_CALL_OPS; |
2249 | } else if (rec->flags & FTRACE_FL_CALL_OPS_EN) { |
2250 | flag |= FTRACE_FL_CALL_OPS; |
2251 | } |
2252 | } |
2253 | |
2254 | /* If the state of this record hasn't changed, then do nothing */ |
2255 | if ((rec->flags & FTRACE_FL_ENABLED) == flag) |
2256 | return FTRACE_UPDATE_IGNORE; |
2257 | |
2258 | if (flag) { |
2259 | /* Save off if rec is being enabled (for return value) */ |
2260 | flag ^= rec->flags & FTRACE_FL_ENABLED; |
2261 | |
2262 | if (update) { |
2263 | rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED; |
2264 | if (flag & FTRACE_FL_REGS) { |
2265 | if (rec->flags & FTRACE_FL_REGS) |
2266 | rec->flags |= FTRACE_FL_REGS_EN; |
2267 | else |
2268 | rec->flags &= ~FTRACE_FL_REGS_EN; |
2269 | } |
2270 | if (flag & FTRACE_FL_TRAMP) { |
2271 | if (rec->flags & FTRACE_FL_TRAMP) |
2272 | rec->flags |= FTRACE_FL_TRAMP_EN; |
2273 | else |
2274 | rec->flags &= ~FTRACE_FL_TRAMP_EN; |
2275 | } |
2276 | |
2277 | /* Keep track of anything that modifies the function */ |
2278 | if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY)) |
2279 | rec->flags |= FTRACE_FL_MODIFIED; |
2280 | |
2281 | if (flag & FTRACE_FL_DIRECT) { |
2282 | /* |
2283 | * If there's only one user (direct_ops helper) |
2284 | * then we can call the direct function |
2285 | * directly (no ftrace trampoline). |
2286 | */ |
2287 | if (ftrace_rec_count(rec) == 1) { |
2288 | if (rec->flags & FTRACE_FL_DIRECT) |
2289 | rec->flags |= FTRACE_FL_DIRECT_EN; |
2290 | else |
2291 | rec->flags &= ~FTRACE_FL_DIRECT_EN; |
2292 | } else { |
2293 | /* |
2294 | * Can only call directly if there's |
2295 | * only one callback to the function. |
2296 | */ |
2297 | rec->flags &= ~FTRACE_FL_DIRECT_EN; |
2298 | } |
2299 | } |
2300 | |
2301 | if (flag & FTRACE_FL_CALL_OPS) { |
2302 | if (ftrace_rec_count(rec) == 1) { |
2303 | if (rec->flags & FTRACE_FL_CALL_OPS) |
2304 | rec->flags |= FTRACE_FL_CALL_OPS_EN; |
2305 | else |
2306 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; |
2307 | } else { |
2308 | /* |
2309 | * Can only call directly if there's |
2310 | * only one set of associated ops. |
2311 | */ |
2312 | rec->flags &= ~FTRACE_FL_CALL_OPS_EN; |
2313 | } |
2314 | } |
2315 | } |
2316 | |
2317 | /* |
2318 | * If this record is being updated from a nop, then |
2319 | * return UPDATE_MAKE_CALL. |
2320 | * Otherwise, |
2321 | * return UPDATE_MODIFY_CALL to tell the caller to convert |
2322 | * from the save regs, to a non-save regs function or |
2323 | * vice versa, or from a trampoline call. |
2324 | */ |
2325 | if (flag & FTRACE_FL_ENABLED) { |
2326 | ftrace_bug_type = FTRACE_BUG_CALL; |
2327 | return FTRACE_UPDATE_MAKE_CALL; |
2328 | } |
2329 | |
2330 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
2331 | return FTRACE_UPDATE_MODIFY_CALL; |
2332 | } |
2333 | |
2334 | if (update) { |
2335 | /* If there's no more users, clear all flags */ |
2336 | if (!ftrace_rec_count(rec)) |
2337 | rec->flags &= FTRACE_NOCLEAR_FLAGS; |
2338 | else |
2339 | /* |
2340 | * Just disable the record, but keep the ops TRAMP |
2341 | * and REGS states. The _EN flags must be disabled though. |
2342 | */ |
2343 | rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | |
2344 | FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN | |
2345 | FTRACE_FL_CALL_OPS_EN); |
2346 | } |
2347 | |
2348 | ftrace_bug_type = FTRACE_BUG_NOP; |
2349 | return FTRACE_UPDATE_MAKE_NOP; |
2350 | } |
2351 | |
2352 | /** |
2353 | * ftrace_update_record - set a record that now is tracing or not |
2354 | * @rec: the record to update |
2355 | * @enable: set to true if the record is tracing, false to force disable |
2356 | * |
2357 | * The records that represent all functions that can be traced need |
2358 | * to be updated when tracing has been enabled. |
2359 | */ |
2360 | int ftrace_update_record(struct dyn_ftrace *rec, bool enable) |
2361 | { |
2362 | return ftrace_check_record(rec, enable, update: true); |
2363 | } |
2364 | |
2365 | /** |
2366 | * ftrace_test_record - check if the record has been enabled or not |
2367 | * @rec: the record to test |
2368 | * @enable: set to true to check if enabled, false if it is disabled |
2369 | * |
2370 | * The arch code may need to test if a record is already set to |
2371 | * tracing to determine how to modify the function code that it |
2372 | * represents. |
2373 | */ |
2374 | int ftrace_test_record(struct dyn_ftrace *rec, bool enable) |
2375 | { |
2376 | return ftrace_check_record(rec, enable, update: false); |
2377 | } |
2378 | |
2379 | static struct ftrace_ops * |
2380 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) |
2381 | { |
2382 | struct ftrace_ops *op; |
2383 | unsigned long ip = rec->ip; |
2384 | |
2385 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2386 | |
2387 | if (!op->trampoline) |
2388 | continue; |
2389 | |
2390 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2391 | return op; |
2392 | } while_for_each_ftrace_op(op); |
2393 | |
2394 | return NULL; |
2395 | } |
2396 | |
2397 | static struct ftrace_ops * |
2398 | ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude) |
2399 | { |
2400 | struct ftrace_ops *op; |
2401 | unsigned long ip = rec->ip; |
2402 | |
2403 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2404 | |
2405 | if (op == op_exclude || !op->trampoline) |
2406 | continue; |
2407 | |
2408 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2409 | return op; |
2410 | } while_for_each_ftrace_op(op); |
2411 | |
2412 | return NULL; |
2413 | } |
2414 | |
2415 | static struct ftrace_ops * |
2416 | ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, |
2417 | struct ftrace_ops *op) |
2418 | { |
2419 | unsigned long ip = rec->ip; |
2420 | |
2421 | while_for_each_ftrace_op(op) { |
2422 | |
2423 | if (!op->trampoline) |
2424 | continue; |
2425 | |
2426 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2427 | return op; |
2428 | } |
2429 | |
2430 | return NULL; |
2431 | } |
2432 | |
2433 | static struct ftrace_ops * |
2434 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) |
2435 | { |
2436 | struct ftrace_ops *op; |
2437 | unsigned long ip = rec->ip; |
2438 | |
2439 | /* |
2440 | * Need to check removed ops first. |
2441 | * If they are being removed, and this rec has a tramp, |
2442 | * and this rec is in the ops list, then it would be the |
2443 | * one with the tramp. |
2444 | */ |
2445 | if (removed_ops) { |
2446 | if (hash_contains_ip(ip, hash: &removed_ops->old_hash)) |
2447 | return removed_ops; |
2448 | } |
2449 | |
2450 | /* |
2451 | * Need to find the current trampoline for a rec. |
2452 | * Now, a trampoline is only attached to a rec if there |
2453 | * was a single 'ops' attached to it. But this can be called |
2454 | * when we are adding another op to the rec or removing the |
2455 | * current one. Thus, if the op is being added, we can |
2456 | * ignore it because it hasn't attached itself to the rec |
2457 | * yet. |
2458 | * |
2459 | * If an ops is being modified (hooking to different functions) |
2460 | * then we don't care about the new functions that are being |
2461 | * added, just the old ones (that are probably being removed). |
2462 | * |
2463 | * If we are adding an ops to a function that already is using |
2464 | * a trampoline, it needs to be removed (trampolines are only |
2465 | * for single ops connected), then an ops that is not being |
2466 | * modified also needs to be checked. |
2467 | */ |
2468 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2469 | |
2470 | if (!op->trampoline) |
2471 | continue; |
2472 | |
2473 | /* |
2474 | * If the ops is being added, it hasn't gotten to |
2475 | * the point to be removed from this tree yet. |
2476 | */ |
2477 | if (op->flags & FTRACE_OPS_FL_ADDING) |
2478 | continue; |
2479 | |
2480 | |
2481 | /* |
2482 | * If the ops is being modified and is in the old |
2483 | * hash, then it is probably being removed from this |
2484 | * function. |
2485 | */ |
2486 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && |
2487 | hash_contains_ip(ip, hash: &op->old_hash)) |
2488 | return op; |
2489 | /* |
2490 | * If the ops is not being added or modified, and it's |
2491 | * in its normal filter hash, then this must be the one |
2492 | * we want! |
2493 | */ |
2494 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && |
2495 | hash_contains_ip(ip, hash: op->func_hash)) |
2496 | return op; |
2497 | |
2498 | } while_for_each_ftrace_op(op); |
2499 | |
2500 | return NULL; |
2501 | } |
2502 | |
2503 | static struct ftrace_ops * |
2504 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) |
2505 | { |
2506 | struct ftrace_ops *op; |
2507 | unsigned long ip = rec->ip; |
2508 | |
2509 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2510 | /* pass rec in as regs to have non-NULL val */ |
2511 | if (hash_contains_ip(ip, hash: op->func_hash)) |
2512 | return op; |
2513 | } while_for_each_ftrace_op(op); |
2514 | |
2515 | return NULL; |
2516 | } |
2517 | |
2518 | struct ftrace_ops * |
2519 | ftrace_find_unique_ops(struct dyn_ftrace *rec) |
2520 | { |
2521 | struct ftrace_ops *op, *found = NULL; |
2522 | unsigned long ip = rec->ip; |
2523 | |
2524 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
2525 | |
2526 | if (hash_contains_ip(ip, hash: op->func_hash)) { |
2527 | if (found) |
2528 | return NULL; |
2529 | found = op; |
2530 | } |
2531 | |
2532 | } while_for_each_ftrace_op(op); |
2533 | |
2534 | return found; |
2535 | } |
2536 | |
2537 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
2538 | /* Protected by rcu_tasks for reading, and direct_mutex for writing */ |
2539 | static struct ftrace_hash *direct_functions = EMPTY_HASH; |
2540 | static DEFINE_MUTEX(direct_mutex); |
2541 | int ftrace_direct_func_count; |
2542 | |
2543 | /* |
2544 | * Search the direct_functions hash to see if the given instruction pointer |
2545 | * has a direct caller attached to it. |
2546 | */ |
2547 | unsigned long ftrace_find_rec_direct(unsigned long ip) |
2548 | { |
2549 | struct ftrace_func_entry *entry; |
2550 | |
2551 | entry = __ftrace_lookup_ip(hash: direct_functions, ip); |
2552 | if (!entry) |
2553 | return 0; |
2554 | |
2555 | return entry->direct; |
2556 | } |
2557 | |
2558 | static struct ftrace_func_entry* |
2559 | ftrace_add_rec_direct(unsigned long ip, unsigned long addr, |
2560 | struct ftrace_hash **free_hash) |
2561 | { |
2562 | struct ftrace_func_entry *entry; |
2563 | |
2564 | if (ftrace_hash_empty(hash: direct_functions) || |
2565 | direct_functions->count > 2 * (1 << direct_functions->size_bits)) { |
2566 | struct ftrace_hash *new_hash; |
2567 | int size = ftrace_hash_empty(hash: direct_functions) ? 0 : |
2568 | direct_functions->count + 1; |
2569 | |
2570 | if (size < 32) |
2571 | size = 32; |
2572 | |
2573 | new_hash = dup_hash(src: direct_functions, size); |
2574 | if (!new_hash) |
2575 | return NULL; |
2576 | |
2577 | *free_hash = direct_functions; |
2578 | direct_functions = new_hash; |
2579 | } |
2580 | |
2581 | entry = kmalloc(size: sizeof(*entry), GFP_KERNEL); |
2582 | if (!entry) |
2583 | return NULL; |
2584 | |
2585 | entry->ip = ip; |
2586 | entry->direct = addr; |
2587 | __add_hash_entry(hash: direct_functions, entry); |
2588 | return entry; |
2589 | } |
2590 | |
2591 | static void call_direct_funcs(unsigned long ip, unsigned long pip, |
2592 | struct ftrace_ops *ops, struct ftrace_regs *fregs) |
2593 | { |
2594 | unsigned long addr = READ_ONCE(ops->direct_call); |
2595 | |
2596 | if (!addr) |
2597 | return; |
2598 | |
2599 | arch_ftrace_set_direct_caller(fregs, addr); |
2600 | } |
2601 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
2602 | |
2603 | /** |
2604 | * ftrace_get_addr_new - Get the call address to set to |
2605 | * @rec: The ftrace record descriptor |
2606 | * |
2607 | * If the record has the FTRACE_FL_REGS set, that means that it |
2608 | * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS |
2609 | * is not set, then it wants to convert to the normal callback. |
2610 | * |
2611 | * Returns the address of the trampoline to set to |
2612 | */ |
2613 | unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) |
2614 | { |
2615 | struct ftrace_ops *ops; |
2616 | unsigned long addr; |
2617 | |
2618 | if ((rec->flags & FTRACE_FL_DIRECT) && |
2619 | (ftrace_rec_count(rec) == 1)) { |
2620 | addr = ftrace_find_rec_direct(ip: rec->ip); |
2621 | if (addr) |
2622 | return addr; |
2623 | WARN_ON_ONCE(1); |
2624 | } |
2625 | |
2626 | /* Trampolines take precedence over regs */ |
2627 | if (rec->flags & FTRACE_FL_TRAMP) { |
2628 | ops = ftrace_find_tramp_ops_new(rec); |
2629 | if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { |
2630 | pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n" , |
2631 | (void *)rec->ip, (void *)rec->ip, rec->flags); |
2632 | /* Ftrace is shutting down, return anything */ |
2633 | return (unsigned long)FTRACE_ADDR; |
2634 | } |
2635 | return ops->trampoline; |
2636 | } |
2637 | |
2638 | if (rec->flags & FTRACE_FL_REGS) |
2639 | return (unsigned long)FTRACE_REGS_ADDR; |
2640 | else |
2641 | return (unsigned long)FTRACE_ADDR; |
2642 | } |
2643 | |
2644 | /** |
2645 | * ftrace_get_addr_curr - Get the call address that is already there |
2646 | * @rec: The ftrace record descriptor |
2647 | * |
2648 | * The FTRACE_FL_REGS_EN is set when the record already points to |
2649 | * a function that saves all the regs. Basically the '_EN' version |
2650 | * represents the current state of the function. |
2651 | * |
2652 | * Returns the address of the trampoline that is currently being called |
2653 | */ |
2654 | unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) |
2655 | { |
2656 | struct ftrace_ops *ops; |
2657 | unsigned long addr; |
2658 | |
2659 | /* Direct calls take precedence over trampolines */ |
2660 | if (rec->flags & FTRACE_FL_DIRECT_EN) { |
2661 | addr = ftrace_find_rec_direct(ip: rec->ip); |
2662 | if (addr) |
2663 | return addr; |
2664 | WARN_ON_ONCE(1); |
2665 | } |
2666 | |
2667 | /* Trampolines take precedence over regs */ |
2668 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
2669 | ops = ftrace_find_tramp_ops_curr(rec); |
2670 | if (FTRACE_WARN_ON(!ops)) { |
2671 | pr_warn("Bad trampoline accounting at: %p (%pS)\n" , |
2672 | (void *)rec->ip, (void *)rec->ip); |
2673 | /* Ftrace is shutting down, return anything */ |
2674 | return (unsigned long)FTRACE_ADDR; |
2675 | } |
2676 | return ops->trampoline; |
2677 | } |
2678 | |
2679 | if (rec->flags & FTRACE_FL_REGS_EN) |
2680 | return (unsigned long)FTRACE_REGS_ADDR; |
2681 | else |
2682 | return (unsigned long)FTRACE_ADDR; |
2683 | } |
2684 | |
2685 | static int |
2686 | __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) |
2687 | { |
2688 | unsigned long ftrace_old_addr; |
2689 | unsigned long ftrace_addr; |
2690 | int ret; |
2691 | |
2692 | ftrace_addr = ftrace_get_addr_new(rec); |
2693 | |
2694 | /* This needs to be done before we call ftrace_update_record */ |
2695 | ftrace_old_addr = ftrace_get_addr_curr(rec); |
2696 | |
2697 | ret = ftrace_update_record(rec, enable); |
2698 | |
2699 | ftrace_bug_type = FTRACE_BUG_UNKNOWN; |
2700 | |
2701 | switch (ret) { |
2702 | case FTRACE_UPDATE_IGNORE: |
2703 | return 0; |
2704 | |
2705 | case FTRACE_UPDATE_MAKE_CALL: |
2706 | ftrace_bug_type = FTRACE_BUG_CALL; |
2707 | return ftrace_make_call(rec, addr: ftrace_addr); |
2708 | |
2709 | case FTRACE_UPDATE_MAKE_NOP: |
2710 | ftrace_bug_type = FTRACE_BUG_NOP; |
2711 | return ftrace_make_nop(NULL, rec, addr: ftrace_old_addr); |
2712 | |
2713 | case FTRACE_UPDATE_MODIFY_CALL: |
2714 | ftrace_bug_type = FTRACE_BUG_UPDATE; |
2715 | return ftrace_modify_call(rec, old_addr: ftrace_old_addr, addr: ftrace_addr); |
2716 | } |
2717 | |
2718 | return -1; /* unknown ftrace bug */ |
2719 | } |
2720 | |
2721 | void __weak ftrace_replace_code(int mod_flags) |
2722 | { |
2723 | struct dyn_ftrace *rec; |
2724 | struct ftrace_page *pg; |
2725 | bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; |
2726 | int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; |
2727 | int failed; |
2728 | |
2729 | if (unlikely(ftrace_disabled)) |
2730 | return; |
2731 | |
2732 | do_for_each_ftrace_rec(pg, rec) { |
2733 | |
2734 | if (skip_record(rec)) |
2735 | continue; |
2736 | |
2737 | failed = __ftrace_replace_code(rec, enable); |
2738 | if (failed) { |
2739 | ftrace_bug(failed, rec); |
2740 | /* Stop processing */ |
2741 | return; |
2742 | } |
2743 | if (schedulable) |
2744 | cond_resched(); |
2745 | } while_for_each_ftrace_rec(); |
2746 | } |
2747 | |
2748 | struct ftrace_rec_iter { |
2749 | struct ftrace_page *pg; |
2750 | int index; |
2751 | }; |
2752 | |
2753 | /** |
2754 | * ftrace_rec_iter_start - start up iterating over traced functions |
2755 | * |
2756 | * Returns an iterator handle that is used to iterate over all |
2757 | * the records that represent address locations where functions |
2758 | * are traced. |
2759 | * |
2760 | * May return NULL if no records are available. |
2761 | */ |
2762 | struct ftrace_rec_iter *ftrace_rec_iter_start(void) |
2763 | { |
2764 | /* |
2765 | * We only use a single iterator. |
2766 | * Protected by the ftrace_lock mutex. |
2767 | */ |
2768 | static struct ftrace_rec_iter ftrace_rec_iter; |
2769 | struct ftrace_rec_iter *iter = &ftrace_rec_iter; |
2770 | |
2771 | iter->pg = ftrace_pages_start; |
2772 | iter->index = 0; |
2773 | |
2774 | /* Could have empty pages */ |
2775 | while (iter->pg && !iter->pg->index) |
2776 | iter->pg = iter->pg->next; |
2777 | |
2778 | if (!iter->pg) |
2779 | return NULL; |
2780 | |
2781 | return iter; |
2782 | } |
2783 | |
2784 | /** |
2785 | * ftrace_rec_iter_next - get the next record to process. |
2786 | * @iter: The handle to the iterator. |
2787 | * |
2788 | * Returns the next iterator after the given iterator @iter. |
2789 | */ |
2790 | struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) |
2791 | { |
2792 | iter->index++; |
2793 | |
2794 | if (iter->index >= iter->pg->index) { |
2795 | iter->pg = iter->pg->next; |
2796 | iter->index = 0; |
2797 | |
2798 | /* Could have empty pages */ |
2799 | while (iter->pg && !iter->pg->index) |
2800 | iter->pg = iter->pg->next; |
2801 | } |
2802 | |
2803 | if (!iter->pg) |
2804 | return NULL; |
2805 | |
2806 | return iter; |
2807 | } |
2808 | |
2809 | /** |
2810 | * ftrace_rec_iter_record - get the record at the iterator location |
2811 | * @iter: The current iterator location |
2812 | * |
2813 | * Returns the record that the current @iter is at. |
2814 | */ |
2815 | struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) |
2816 | { |
2817 | return &iter->pg->records[iter->index]; |
2818 | } |
2819 | |
2820 | static int |
2821 | ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) |
2822 | { |
2823 | int ret; |
2824 | |
2825 | if (unlikely(ftrace_disabled)) |
2826 | return 0; |
2827 | |
2828 | ret = ftrace_init_nop(mod, rec); |
2829 | if (ret) { |
2830 | ftrace_bug_type = FTRACE_BUG_INIT; |
2831 | ftrace_bug(failed: ret, rec); |
2832 | return 0; |
2833 | } |
2834 | return 1; |
2835 | } |
2836 | |
2837 | /* |
2838 | * archs can override this function if they must do something |
2839 | * before the modifying code is performed. |
2840 | */ |
2841 | void __weak ftrace_arch_code_modify_prepare(void) |
2842 | { |
2843 | } |
2844 | |
2845 | /* |
2846 | * archs can override this function if they must do something |
2847 | * after the modifying code is performed. |
2848 | */ |
2849 | void __weak ftrace_arch_code_modify_post_process(void) |
2850 | { |
2851 | } |
2852 | |
2853 | static int update_ftrace_func(ftrace_func_t func) |
2854 | { |
2855 | static ftrace_func_t save_func; |
2856 | |
2857 | /* Avoid updating if it hasn't changed */ |
2858 | if (func == save_func) |
2859 | return 0; |
2860 | |
2861 | save_func = func; |
2862 | |
2863 | return ftrace_update_ftrace_func(func); |
2864 | } |
2865 | |
2866 | void ftrace_modify_all_code(int command) |
2867 | { |
2868 | int update = command & FTRACE_UPDATE_TRACE_FUNC; |
2869 | int mod_flags = 0; |
2870 | int err = 0; |
2871 | |
2872 | if (command & FTRACE_MAY_SLEEP) |
2873 | mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; |
2874 | |
2875 | /* |
2876 | * If the ftrace_caller calls a ftrace_ops func directly, |
2877 | * we need to make sure that it only traces functions it |
2878 | * expects to trace. When doing the switch of functions, |
2879 | * we need to update to the ftrace_ops_list_func first |
2880 | * before the transition between old and new calls are set, |
2881 | * as the ftrace_ops_list_func will check the ops hashes |
2882 | * to make sure the ops are having the right functions |
2883 | * traced. |
2884 | */ |
2885 | if (update) { |
2886 | err = update_ftrace_func(func: ftrace_ops_list_func); |
2887 | if (FTRACE_WARN_ON(err)) |
2888 | return; |
2889 | } |
2890 | |
2891 | if (command & FTRACE_UPDATE_CALLS) |
2892 | ftrace_replace_code(mod_flags: mod_flags | FTRACE_MODIFY_ENABLE_FL); |
2893 | else if (command & FTRACE_DISABLE_CALLS) |
2894 | ftrace_replace_code(mod_flags); |
2895 | |
2896 | if (update && ftrace_trace_function != ftrace_ops_list_func) { |
2897 | function_trace_op = set_function_trace_op; |
2898 | smp_wmb(); |
2899 | /* If irqs are disabled, we are in stop machine */ |
2900 | if (!irqs_disabled()) |
2901 | smp_call_function(func: ftrace_sync_ipi, NULL, wait: 1); |
2902 | err = update_ftrace_func(func: ftrace_trace_function); |
2903 | if (FTRACE_WARN_ON(err)) |
2904 | return; |
2905 | } |
2906 | |
2907 | if (command & FTRACE_START_FUNC_RET) |
2908 | err = ftrace_enable_ftrace_graph_caller(); |
2909 | else if (command & FTRACE_STOP_FUNC_RET) |
2910 | err = ftrace_disable_ftrace_graph_caller(); |
2911 | FTRACE_WARN_ON(err); |
2912 | } |
2913 | |
2914 | static int __ftrace_modify_code(void *data) |
2915 | { |
2916 | int *command = data; |
2917 | |
2918 | ftrace_modify_all_code(command: *command); |
2919 | |
2920 | return 0; |
2921 | } |
2922 | |
2923 | /** |
2924 | * ftrace_run_stop_machine - go back to the stop machine method |
2925 | * @command: The command to tell ftrace what to do |
2926 | * |
2927 | * If an arch needs to fall back to the stop machine method, the |
2928 | * it can call this function. |
2929 | */ |
2930 | void ftrace_run_stop_machine(int command) |
2931 | { |
2932 | stop_machine(fn: __ftrace_modify_code, data: &command, NULL); |
2933 | } |
2934 | |
2935 | /** |
2936 | * arch_ftrace_update_code - modify the code to trace or not trace |
2937 | * @command: The command that needs to be done |
2938 | * |
2939 | * Archs can override this function if it does not need to |
2940 | * run stop_machine() to modify code. |
2941 | */ |
2942 | void __weak arch_ftrace_update_code(int command) |
2943 | { |
2944 | ftrace_run_stop_machine(command); |
2945 | } |
2946 | |
2947 | static void ftrace_run_update_code(int command) |
2948 | { |
2949 | ftrace_arch_code_modify_prepare(); |
2950 | |
2951 | /* |
2952 | * By default we use stop_machine() to modify the code. |
2953 | * But archs can do what ever they want as long as it |
2954 | * is safe. The stop_machine() is the safest, but also |
2955 | * produces the most overhead. |
2956 | */ |
2957 | arch_ftrace_update_code(command); |
2958 | |
2959 | ftrace_arch_code_modify_post_process(); |
2960 | } |
2961 | |
2962 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
2963 | struct ftrace_ops_hash *old_hash) |
2964 | { |
2965 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
2966 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2967 | ops->old_hash.notrace_hash = old_hash->notrace_hash; |
2968 | ftrace_run_update_code(command); |
2969 | ops->old_hash.filter_hash = NULL; |
2970 | ops->old_hash.notrace_hash = NULL; |
2971 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2972 | } |
2973 | |
2974 | static ftrace_func_t saved_ftrace_func; |
2975 | static int ftrace_start_up; |
2976 | |
2977 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) |
2978 | { |
2979 | } |
2980 | |
2981 | /* List of trace_ops that have allocated trampolines */ |
2982 | static LIST_HEAD(ftrace_ops_trampoline_list); |
2983 | |
2984 | static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops) |
2985 | { |
2986 | lockdep_assert_held(&ftrace_lock); |
2987 | list_add_rcu(new: &ops->list, head: &ftrace_ops_trampoline_list); |
2988 | } |
2989 | |
2990 | static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) |
2991 | { |
2992 | lockdep_assert_held(&ftrace_lock); |
2993 | list_del_rcu(entry: &ops->list); |
2994 | synchronize_rcu(); |
2995 | } |
2996 | |
2997 | /* |
2998 | * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols |
2999 | * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is |
3000 | * not a module. |
3001 | */ |
3002 | #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace" |
3003 | #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline" |
3004 | |
3005 | static void ftrace_trampoline_free(struct ftrace_ops *ops) |
3006 | { |
3007 | if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) && |
3008 | ops->trampoline) { |
3009 | /* |
3010 | * Record the text poke event before the ksymbol unregister |
3011 | * event. |
3012 | */ |
3013 | perf_event_text_poke(addr: (void *)ops->trampoline, |
3014 | old_bytes: (void *)ops->trampoline, |
3015 | old_len: ops->trampoline_size, NULL, new_len: 0); |
3016 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, |
3017 | addr: ops->trampoline, len: ops->trampoline_size, |
3018 | unregister: true, FTRACE_TRAMPOLINE_SYM); |
3019 | /* Remove from kallsyms after the perf events */ |
3020 | ftrace_remove_trampoline_from_kallsyms(ops); |
3021 | } |
3022 | |
3023 | arch_ftrace_trampoline_free(ops); |
3024 | } |
3025 | |
3026 | static void ftrace_startup_enable(int command) |
3027 | { |
3028 | if (saved_ftrace_func != ftrace_trace_function) { |
3029 | saved_ftrace_func = ftrace_trace_function; |
3030 | command |= FTRACE_UPDATE_TRACE_FUNC; |
3031 | } |
3032 | |
3033 | if (!command || !ftrace_enabled) |
3034 | return; |
3035 | |
3036 | ftrace_run_update_code(command); |
3037 | } |
3038 | |
3039 | static void ftrace_startup_all(int command) |
3040 | { |
3041 | update_all_ops = true; |
3042 | ftrace_startup_enable(command); |
3043 | update_all_ops = false; |
3044 | } |
3045 | |
3046 | int ftrace_startup(struct ftrace_ops *ops, int command) |
3047 | { |
3048 | int ret; |
3049 | |
3050 | if (unlikely(ftrace_disabled)) |
3051 | return -ENODEV; |
3052 | |
3053 | ret = __register_ftrace_function(ops); |
3054 | if (ret) |
3055 | return ret; |
3056 | |
3057 | ftrace_start_up++; |
3058 | |
3059 | /* |
3060 | * Note that ftrace probes uses this to start up |
3061 | * and modify functions it will probe. But we still |
3062 | * set the ADDING flag for modification, as probes |
3063 | * do not have trampolines. If they add them in the |
3064 | * future, then the probes will need to distinguish |
3065 | * between adding and updating probes. |
3066 | */ |
3067 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; |
3068 | |
3069 | ret = ftrace_hash_ipmodify_enable(ops); |
3070 | if (ret < 0) { |
3071 | /* Rollback registration process */ |
3072 | __unregister_ftrace_function(ops); |
3073 | ftrace_start_up--; |
3074 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3075 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) |
3076 | ftrace_trampoline_free(ops); |
3077 | return ret; |
3078 | } |
3079 | |
3080 | if (ftrace_hash_rec_enable(ops, filter_hash: 1)) |
3081 | command |= FTRACE_UPDATE_CALLS; |
3082 | |
3083 | ftrace_startup_enable(command); |
3084 | |
3085 | /* |
3086 | * If ftrace is in an undefined state, we just remove ops from list |
3087 | * to prevent the NULL pointer, instead of totally rolling it back and |
3088 | * free trampoline, because those actions could cause further damage. |
3089 | */ |
3090 | if (unlikely(ftrace_disabled)) { |
3091 | __unregister_ftrace_function(ops); |
3092 | return -ENODEV; |
3093 | } |
3094 | |
3095 | ops->flags &= ~FTRACE_OPS_FL_ADDING; |
3096 | |
3097 | return 0; |
3098 | } |
3099 | |
3100 | int ftrace_shutdown(struct ftrace_ops *ops, int command) |
3101 | { |
3102 | int ret; |
3103 | |
3104 | if (unlikely(ftrace_disabled)) |
3105 | return -ENODEV; |
3106 | |
3107 | ret = __unregister_ftrace_function(ops); |
3108 | if (ret) |
3109 | return ret; |
3110 | |
3111 | ftrace_start_up--; |
3112 | /* |
3113 | * Just warn in case of unbalance, no need to kill ftrace, it's not |
3114 | * critical but the ftrace_call callers may be never nopped again after |
3115 | * further ftrace uses. |
3116 | */ |
3117 | WARN_ON_ONCE(ftrace_start_up < 0); |
3118 | |
3119 | /* Disabling ipmodify never fails */ |
3120 | ftrace_hash_ipmodify_disable(ops); |
3121 | |
3122 | if (ftrace_hash_rec_disable(ops, filter_hash: 1)) |
3123 | command |= FTRACE_UPDATE_CALLS; |
3124 | |
3125 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
3126 | |
3127 | if (saved_ftrace_func != ftrace_trace_function) { |
3128 | saved_ftrace_func = ftrace_trace_function; |
3129 | command |= FTRACE_UPDATE_TRACE_FUNC; |
3130 | } |
3131 | |
3132 | if (!command || !ftrace_enabled) |
3133 | goto out; |
3134 | |
3135 | /* |
3136 | * If the ops uses a trampoline, then it needs to be |
3137 | * tested first on update. |
3138 | */ |
3139 | ops->flags |= FTRACE_OPS_FL_REMOVING; |
3140 | removed_ops = ops; |
3141 | |
3142 | /* The trampoline logic checks the old hashes */ |
3143 | ops->old_hash.filter_hash = ops->func_hash->filter_hash; |
3144 | ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; |
3145 | |
3146 | ftrace_run_update_code(command); |
3147 | |
3148 | /* |
3149 | * If there's no more ops registered with ftrace, run a |
3150 | * sanity check to make sure all rec flags are cleared. |
3151 | */ |
3152 | if (rcu_dereference_protected(ftrace_ops_list, |
3153 | lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { |
3154 | struct ftrace_page *pg; |
3155 | struct dyn_ftrace *rec; |
3156 | |
3157 | do_for_each_ftrace_rec(pg, rec) { |
3158 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS)) |
3159 | pr_warn(" %pS flags:%lx\n" , |
3160 | (void *)rec->ip, rec->flags); |
3161 | } while_for_each_ftrace_rec(); |
3162 | } |
3163 | |
3164 | ops->old_hash.filter_hash = NULL; |
3165 | ops->old_hash.notrace_hash = NULL; |
3166 | |
3167 | removed_ops = NULL; |
3168 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; |
3169 | |
3170 | out: |
3171 | /* |
3172 | * Dynamic ops may be freed, we must make sure that all |
3173 | * callers are done before leaving this function. |
3174 | */ |
3175 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { |
3176 | /* |
3177 | * We need to do a hard force of sched synchronization. |
3178 | * This is because we use preempt_disable() to do RCU, but |
3179 | * the function tracers can be called where RCU is not watching |
3180 | * (like before user_exit()). We can not rely on the RCU |
3181 | * infrastructure to do the synchronization, thus we must do it |
3182 | * ourselves. |
3183 | */ |
3184 | synchronize_rcu_tasks_rude(); |
3185 | |
3186 | /* |
3187 | * When the kernel is preemptive, tasks can be preempted |
3188 | * while on a ftrace trampoline. Just scheduling a task on |
3189 | * a CPU is not good enough to flush them. Calling |
3190 | * synchronize_rcu_tasks() will wait for those tasks to |
3191 | * execute and either schedule voluntarily or enter user space. |
3192 | */ |
3193 | if (IS_ENABLED(CONFIG_PREEMPTION)) |
3194 | synchronize_rcu_tasks(); |
3195 | |
3196 | ftrace_trampoline_free(ops); |
3197 | } |
3198 | |
3199 | return 0; |
3200 | } |
3201 | |
3202 | static u64 ftrace_update_time; |
3203 | unsigned long ftrace_update_tot_cnt; |
3204 | unsigned long ftrace_number_of_pages; |
3205 | unsigned long ftrace_number_of_groups; |
3206 | |
3207 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
3208 | { |
3209 | /* |
3210 | * Filter_hash being empty will default to trace module. |
3211 | * But notrace hash requires a test of individual module functions. |
3212 | */ |
3213 | return ftrace_hash_empty(hash: ops->func_hash->filter_hash) && |
3214 | ftrace_hash_empty(hash: ops->func_hash->notrace_hash); |
3215 | } |
3216 | |
3217 | static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) |
3218 | { |
3219 | bool init_nop = ftrace_need_init_nop(); |
3220 | struct ftrace_page *pg; |
3221 | struct dyn_ftrace *p; |
3222 | u64 start, stop; |
3223 | unsigned long update_cnt = 0; |
3224 | unsigned long rec_flags = 0; |
3225 | int i; |
3226 | |
3227 | start = ftrace_now(raw_smp_processor_id()); |
3228 | |
3229 | /* |
3230 | * When a module is loaded, this function is called to convert |
3231 | * the calls to mcount in its text to nops, and also to create |
3232 | * an entry in the ftrace data. Now, if ftrace is activated |
3233 | * after this call, but before the module sets its text to |
3234 | * read-only, the modification of enabling ftrace can fail if |
3235 | * the read-only is done while ftrace is converting the calls. |
3236 | * To prevent this, the module's records are set as disabled |
3237 | * and will be enabled after the call to set the module's text |
3238 | * to read-only. |
3239 | */ |
3240 | if (mod) |
3241 | rec_flags |= FTRACE_FL_DISABLED; |
3242 | |
3243 | for (pg = new_pgs; pg; pg = pg->next) { |
3244 | |
3245 | for (i = 0; i < pg->index; i++) { |
3246 | |
3247 | /* If something went wrong, bail without enabling anything */ |
3248 | if (unlikely(ftrace_disabled)) |
3249 | return -1; |
3250 | |
3251 | p = &pg->records[i]; |
3252 | p->flags = rec_flags; |
3253 | |
3254 | /* |
3255 | * Do the initial record conversion from mcount jump |
3256 | * to the NOP instructions. |
3257 | */ |
3258 | if (init_nop && !ftrace_nop_initialize(mod, rec: p)) |
3259 | break; |
3260 | |
3261 | update_cnt++; |
3262 | } |
3263 | } |
3264 | |
3265 | stop = ftrace_now(raw_smp_processor_id()); |
3266 | ftrace_update_time = stop - start; |
3267 | ftrace_update_tot_cnt += update_cnt; |
3268 | |
3269 | return 0; |
3270 | } |
3271 | |
3272 | static int ftrace_allocate_records(struct ftrace_page *pg, int count) |
3273 | { |
3274 | int order; |
3275 | int pages; |
3276 | int cnt; |
3277 | |
3278 | if (WARN_ON(!count)) |
3279 | return -EINVAL; |
3280 | |
3281 | /* We want to fill as much as possible, with no empty pages */ |
3282 | pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); |
3283 | order = fls(x: pages) - 1; |
3284 | |
3285 | again: |
3286 | pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
3287 | |
3288 | if (!pg->records) { |
3289 | /* if we can't allocate this size, try something smaller */ |
3290 | if (!order) |
3291 | return -ENOMEM; |
3292 | order--; |
3293 | goto again; |
3294 | } |
3295 | |
3296 | ftrace_number_of_pages += 1 << order; |
3297 | ftrace_number_of_groups++; |
3298 | |
3299 | cnt = (PAGE_SIZE << order) / ENTRY_SIZE; |
3300 | pg->order = order; |
3301 | |
3302 | if (cnt > count) |
3303 | cnt = count; |
3304 | |
3305 | return cnt; |
3306 | } |
3307 | |
3308 | static void ftrace_free_pages(struct ftrace_page *pages) |
3309 | { |
3310 | struct ftrace_page *pg = pages; |
3311 | |
3312 | while (pg) { |
3313 | if (pg->records) { |
3314 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
3315 | ftrace_number_of_pages -= 1 << pg->order; |
3316 | } |
3317 | pages = pg->next; |
3318 | kfree(objp: pg); |
3319 | pg = pages; |
3320 | ftrace_number_of_groups--; |
3321 | } |
3322 | } |
3323 | |
3324 | static struct ftrace_page * |
3325 | ftrace_allocate_pages(unsigned long num_to_init) |
3326 | { |
3327 | struct ftrace_page *start_pg; |
3328 | struct ftrace_page *pg; |
3329 | int cnt; |
3330 | |
3331 | if (!num_to_init) |
3332 | return NULL; |
3333 | |
3334 | start_pg = pg = kzalloc(size: sizeof(*pg), GFP_KERNEL); |
3335 | if (!pg) |
3336 | return NULL; |
3337 | |
3338 | /* |
3339 | * Try to allocate as much as possible in one continues |
3340 | * location that fills in all of the space. We want to |
3341 | * waste as little space as possible. |
3342 | */ |
3343 | for (;;) { |
3344 | cnt = ftrace_allocate_records(pg, count: num_to_init); |
3345 | if (cnt < 0) |
3346 | goto free_pages; |
3347 | |
3348 | num_to_init -= cnt; |
3349 | if (!num_to_init) |
3350 | break; |
3351 | |
3352 | pg->next = kzalloc(size: sizeof(*pg), GFP_KERNEL); |
3353 | if (!pg->next) |
3354 | goto free_pages; |
3355 | |
3356 | pg = pg->next; |
3357 | } |
3358 | |
3359 | return start_pg; |
3360 | |
3361 | free_pages: |
3362 | ftrace_free_pages(pages: start_pg); |
3363 | pr_info("ftrace: FAILED to allocate memory for functions\n" ); |
3364 | return NULL; |
3365 | } |
3366 | |
3367 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
3368 | |
3369 | struct ftrace_iterator { |
3370 | loff_t pos; |
3371 | loff_t func_pos; |
3372 | loff_t mod_pos; |
3373 | struct ftrace_page *pg; |
3374 | struct dyn_ftrace *func; |
3375 | struct ftrace_func_probe *probe; |
3376 | struct ftrace_func_entry *probe_entry; |
3377 | struct trace_parser parser; |
3378 | struct ftrace_hash *hash; |
3379 | struct ftrace_ops *ops; |
3380 | struct trace_array *tr; |
3381 | struct list_head *mod_list; |
3382 | int pidx; |
3383 | int idx; |
3384 | unsigned flags; |
3385 | }; |
3386 | |
3387 | static void * |
3388 | t_probe_next(struct seq_file *m, loff_t *pos) |
3389 | { |
3390 | struct ftrace_iterator *iter = m->private; |
3391 | struct trace_array *tr = iter->ops->private; |
3392 | struct list_head *func_probes; |
3393 | struct ftrace_hash *hash; |
3394 | struct list_head *next; |
3395 | struct hlist_node *hnd = NULL; |
3396 | struct hlist_head *hhd; |
3397 | int size; |
3398 | |
3399 | (*pos)++; |
3400 | iter->pos = *pos; |
3401 | |
3402 | if (!tr) |
3403 | return NULL; |
3404 | |
3405 | func_probes = &tr->func_probes; |
3406 | if (list_empty(head: func_probes)) |
3407 | return NULL; |
3408 | |
3409 | if (!iter->probe) { |
3410 | next = func_probes->next; |
3411 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
3412 | } |
3413 | |
3414 | if (iter->probe_entry) |
3415 | hnd = &iter->probe_entry->hlist; |
3416 | |
3417 | hash = iter->probe->ops.func_hash->filter_hash; |
3418 | |
3419 | /* |
3420 | * A probe being registered may temporarily have an empty hash |
3421 | * and it's at the end of the func_probes list. |
3422 | */ |
3423 | if (!hash || hash == EMPTY_HASH) |
3424 | return NULL; |
3425 | |
3426 | size = 1 << hash->size_bits; |
3427 | |
3428 | retry: |
3429 | if (iter->pidx >= size) { |
3430 | if (iter->probe->list.next == func_probes) |
3431 | return NULL; |
3432 | next = iter->probe->list.next; |
3433 | iter->probe = list_entry(next, struct ftrace_func_probe, list); |
3434 | hash = iter->probe->ops.func_hash->filter_hash; |
3435 | size = 1 << hash->size_bits; |
3436 | iter->pidx = 0; |
3437 | } |
3438 | |
3439 | hhd = &hash->buckets[iter->pidx]; |
3440 | |
3441 | if (hlist_empty(h: hhd)) { |
3442 | iter->pidx++; |
3443 | hnd = NULL; |
3444 | goto retry; |
3445 | } |
3446 | |
3447 | if (!hnd) |
3448 | hnd = hhd->first; |
3449 | else { |
3450 | hnd = hnd->next; |
3451 | if (!hnd) { |
3452 | iter->pidx++; |
3453 | goto retry; |
3454 | } |
3455 | } |
3456 | |
3457 | if (WARN_ON_ONCE(!hnd)) |
3458 | return NULL; |
3459 | |
3460 | iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); |
3461 | |
3462 | return iter; |
3463 | } |
3464 | |
3465 | static void *t_probe_start(struct seq_file *m, loff_t *pos) |
3466 | { |
3467 | struct ftrace_iterator *iter = m->private; |
3468 | void *p = NULL; |
3469 | loff_t l; |
3470 | |
3471 | if (!(iter->flags & FTRACE_ITER_DO_PROBES)) |
3472 | return NULL; |
3473 | |
3474 | if (iter->mod_pos > *pos) |
3475 | return NULL; |
3476 | |
3477 | iter->probe = NULL; |
3478 | iter->probe_entry = NULL; |
3479 | iter->pidx = 0; |
3480 | for (l = 0; l <= (*pos - iter->mod_pos); ) { |
3481 | p = t_probe_next(m, pos: &l); |
3482 | if (!p) |
3483 | break; |
3484 | } |
3485 | if (!p) |
3486 | return NULL; |
3487 | |
3488 | /* Only set this if we have an item */ |
3489 | iter->flags |= FTRACE_ITER_PROBE; |
3490 | |
3491 | return iter; |
3492 | } |
3493 | |
3494 | static int |
3495 | t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) |
3496 | { |
3497 | struct ftrace_func_entry *probe_entry; |
3498 | struct ftrace_probe_ops *probe_ops; |
3499 | struct ftrace_func_probe *probe; |
3500 | |
3501 | probe = iter->probe; |
3502 | probe_entry = iter->probe_entry; |
3503 | |
3504 | if (WARN_ON_ONCE(!probe || !probe_entry)) |
3505 | return -EIO; |
3506 | |
3507 | probe_ops = probe->probe_ops; |
3508 | |
3509 | if (probe_ops->print) |
3510 | return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); |
3511 | |
3512 | seq_printf(m, fmt: "%ps:%ps\n" , (void *)probe_entry->ip, |
3513 | (void *)probe_ops->func); |
3514 | |
3515 | return 0; |
3516 | } |
3517 | |
3518 | static void * |
3519 | t_mod_next(struct seq_file *m, loff_t *pos) |
3520 | { |
3521 | struct ftrace_iterator *iter = m->private; |
3522 | struct trace_array *tr = iter->tr; |
3523 | |
3524 | (*pos)++; |
3525 | iter->pos = *pos; |
3526 | |
3527 | iter->mod_list = iter->mod_list->next; |
3528 | |
3529 | if (iter->mod_list == &tr->mod_trace || |
3530 | iter->mod_list == &tr->mod_notrace) { |
3531 | iter->flags &= ~FTRACE_ITER_MOD; |
3532 | return NULL; |
3533 | } |
3534 | |
3535 | iter->mod_pos = *pos; |
3536 | |
3537 | return iter; |
3538 | } |
3539 | |
3540 | static void *t_mod_start(struct seq_file *m, loff_t *pos) |
3541 | { |
3542 | struct ftrace_iterator *iter = m->private; |
3543 | void *p = NULL; |
3544 | loff_t l; |
3545 | |
3546 | if (iter->func_pos > *pos) |
3547 | return NULL; |
3548 | |
3549 | iter->mod_pos = iter->func_pos; |
3550 | |
3551 | /* probes are only available if tr is set */ |
3552 | if (!iter->tr) |
3553 | return NULL; |
3554 | |
3555 | for (l = 0; l <= (*pos - iter->func_pos); ) { |
3556 | p = t_mod_next(m, pos: &l); |
3557 | if (!p) |
3558 | break; |
3559 | } |
3560 | if (!p) { |
3561 | iter->flags &= ~FTRACE_ITER_MOD; |
3562 | return t_probe_start(m, pos); |
3563 | } |
3564 | |
3565 | /* Only set this if we have an item */ |
3566 | iter->flags |= FTRACE_ITER_MOD; |
3567 | |
3568 | return iter; |
3569 | } |
3570 | |
3571 | static int |
3572 | t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) |
3573 | { |
3574 | struct ftrace_mod_load *ftrace_mod; |
3575 | struct trace_array *tr = iter->tr; |
3576 | |
3577 | if (WARN_ON_ONCE(!iter->mod_list) || |
3578 | iter->mod_list == &tr->mod_trace || |
3579 | iter->mod_list == &tr->mod_notrace) |
3580 | return -EIO; |
3581 | |
3582 | ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); |
3583 | |
3584 | if (ftrace_mod->func) |
3585 | seq_printf(m, fmt: "%s" , ftrace_mod->func); |
3586 | else |
3587 | seq_putc(m, c: '*'); |
3588 | |
3589 | seq_printf(m, fmt: ":mod:%s\n" , ftrace_mod->module); |
3590 | |
3591 | return 0; |
3592 | } |
3593 | |
3594 | static void * |
3595 | t_func_next(struct seq_file *m, loff_t *pos) |
3596 | { |
3597 | struct ftrace_iterator *iter = m->private; |
3598 | struct dyn_ftrace *rec = NULL; |
3599 | |
3600 | (*pos)++; |
3601 | |
3602 | retry: |
3603 | if (iter->idx >= iter->pg->index) { |
3604 | if (iter->pg->next) { |
3605 | iter->pg = iter->pg->next; |
3606 | iter->idx = 0; |
3607 | goto retry; |
3608 | } |
3609 | } else { |
3610 | rec = &iter->pg->records[iter->idx++]; |
3611 | if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3612 | !ftrace_lookup_ip(hash: iter->hash, ip: rec->ip)) || |
3613 | |
3614 | ((iter->flags & FTRACE_ITER_ENABLED) && |
3615 | !(rec->flags & FTRACE_FL_ENABLED)) || |
3616 | |
3617 | ((iter->flags & FTRACE_ITER_TOUCHED) && |
3618 | !(rec->flags & FTRACE_FL_TOUCHED))) { |
3619 | |
3620 | rec = NULL; |
3621 | goto retry; |
3622 | } |
3623 | } |
3624 | |
3625 | if (!rec) |
3626 | return NULL; |
3627 | |
3628 | iter->pos = iter->func_pos = *pos; |
3629 | iter->func = rec; |
3630 | |
3631 | return iter; |
3632 | } |
3633 | |
3634 | static void * |
3635 | t_next(struct seq_file *m, void *v, loff_t *pos) |
3636 | { |
3637 | struct ftrace_iterator *iter = m->private; |
3638 | loff_t l = *pos; /* t_probe_start() must use original pos */ |
3639 | void *ret; |
3640 | |
3641 | if (unlikely(ftrace_disabled)) |
3642 | return NULL; |
3643 | |
3644 | if (iter->flags & FTRACE_ITER_PROBE) |
3645 | return t_probe_next(m, pos); |
3646 | |
3647 | if (iter->flags & FTRACE_ITER_MOD) |
3648 | return t_mod_next(m, pos); |
3649 | |
3650 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
3651 | /* next must increment pos, and t_probe_start does not */ |
3652 | (*pos)++; |
3653 | return t_mod_start(m, pos: &l); |
3654 | } |
3655 | |
3656 | ret = t_func_next(m, pos); |
3657 | |
3658 | if (!ret) |
3659 | return t_mod_start(m, pos: &l); |
3660 | |
3661 | return ret; |
3662 | } |
3663 | |
3664 | static void reset_iter_read(struct ftrace_iterator *iter) |
3665 | { |
3666 | iter->pos = 0; |
3667 | iter->func_pos = 0; |
3668 | iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); |
3669 | } |
3670 | |
3671 | static void *t_start(struct seq_file *m, loff_t *pos) |
3672 | { |
3673 | struct ftrace_iterator *iter = m->private; |
3674 | void *p = NULL; |
3675 | loff_t l; |
3676 | |
3677 | mutex_lock(&ftrace_lock); |
3678 | |
3679 | if (unlikely(ftrace_disabled)) |
3680 | return NULL; |
3681 | |
3682 | /* |
3683 | * If an lseek was done, then reset and start from beginning. |
3684 | */ |
3685 | if (*pos < iter->pos) |
3686 | reset_iter_read(iter); |
3687 | |
3688 | /* |
3689 | * For set_ftrace_filter reading, if we have the filter |
3690 | * off, we can short cut and just print out that all |
3691 | * functions are enabled. |
3692 | */ |
3693 | if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && |
3694 | ftrace_hash_empty(hash: iter->hash)) { |
3695 | iter->func_pos = 1; /* Account for the message */ |
3696 | if (*pos > 0) |
3697 | return t_mod_start(m, pos); |
3698 | iter->flags |= FTRACE_ITER_PRINTALL; |
3699 | /* reset in case of seek/pread */ |
3700 | iter->flags &= ~FTRACE_ITER_PROBE; |
3701 | return iter; |
3702 | } |
3703 | |
3704 | if (iter->flags & FTRACE_ITER_MOD) |
3705 | return t_mod_start(m, pos); |
3706 | |
3707 | /* |
3708 | * Unfortunately, we need to restart at ftrace_pages_start |
3709 | * every time we let go of the ftrace_mutex. This is because |
3710 | * those pointers can change without the lock. |
3711 | */ |
3712 | iter->pg = ftrace_pages_start; |
3713 | iter->idx = 0; |
3714 | for (l = 0; l <= *pos; ) { |
3715 | p = t_func_next(m, pos: &l); |
3716 | if (!p) |
3717 | break; |
3718 | } |
3719 | |
3720 | if (!p) |
3721 | return t_mod_start(m, pos); |
3722 | |
3723 | return iter; |
3724 | } |
3725 | |
3726 | static void t_stop(struct seq_file *m, void *p) |
3727 | { |
3728 | mutex_unlock(lock: &ftrace_lock); |
3729 | } |
3730 | |
3731 | void * __weak |
3732 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) |
3733 | { |
3734 | return NULL; |
3735 | } |
3736 | |
3737 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, |
3738 | struct dyn_ftrace *rec) |
3739 | { |
3740 | void *ptr; |
3741 | |
3742 | ptr = arch_ftrace_trampoline_func(ops, rec); |
3743 | if (ptr) |
3744 | seq_printf(m, fmt: " ->%pS" , ptr); |
3745 | } |
3746 | |
3747 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
3748 | /* |
3749 | * Weak functions can still have an mcount/fentry that is saved in |
3750 | * the __mcount_loc section. These can be detected by having a |
3751 | * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the |
3752 | * symbol found by kallsyms is not the function that the mcount/fentry |
3753 | * is part of. The offset is much greater in these cases. |
3754 | * |
3755 | * Test the record to make sure that the ip points to a valid kallsyms |
3756 | * and if not, mark it disabled. |
3757 | */ |
3758 | static int test_for_valid_rec(struct dyn_ftrace *rec) |
3759 | { |
3760 | char str[KSYM_SYMBOL_LEN]; |
3761 | unsigned long offset; |
3762 | const char *ret; |
3763 | |
3764 | ret = kallsyms_lookup(addr: rec->ip, NULL, offset: &offset, NULL, namebuf: str); |
3765 | |
3766 | /* Weak functions can cause invalid addresses */ |
3767 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { |
3768 | rec->flags |= FTRACE_FL_DISABLED; |
3769 | return 0; |
3770 | } |
3771 | return 1; |
3772 | } |
3773 | |
3774 | static struct workqueue_struct *ftrace_check_wq __initdata; |
3775 | static struct work_struct ftrace_check_work __initdata; |
3776 | |
3777 | /* |
3778 | * Scan all the mcount/fentry entries to make sure they are valid. |
3779 | */ |
3780 | static __init void ftrace_check_work_func(struct work_struct *work) |
3781 | { |
3782 | struct ftrace_page *pg; |
3783 | struct dyn_ftrace *rec; |
3784 | |
3785 | mutex_lock(&ftrace_lock); |
3786 | do_for_each_ftrace_rec(pg, rec) { |
3787 | test_for_valid_rec(rec); |
3788 | } while_for_each_ftrace_rec(); |
3789 | mutex_unlock(lock: &ftrace_lock); |
3790 | } |
3791 | |
3792 | static int __init ftrace_check_for_weak_functions(void) |
3793 | { |
3794 | INIT_WORK(&ftrace_check_work, ftrace_check_work_func); |
3795 | |
3796 | ftrace_check_wq = alloc_workqueue(fmt: "ftrace_check_wq" , flags: WQ_UNBOUND, max_active: 0); |
3797 | |
3798 | queue_work(wq: ftrace_check_wq, work: &ftrace_check_work); |
3799 | return 0; |
3800 | } |
3801 | |
3802 | static int __init ftrace_check_sync(void) |
3803 | { |
3804 | /* Make sure the ftrace_check updates are finished */ |
3805 | if (ftrace_check_wq) |
3806 | destroy_workqueue(wq: ftrace_check_wq); |
3807 | return 0; |
3808 | } |
3809 | |
3810 | late_initcall_sync(ftrace_check_sync); |
3811 | subsys_initcall(ftrace_check_for_weak_functions); |
3812 | |
3813 | static int print_rec(struct seq_file *m, unsigned long ip) |
3814 | { |
3815 | unsigned long offset; |
3816 | char str[KSYM_SYMBOL_LEN]; |
3817 | char *modname; |
3818 | const char *ret; |
3819 | |
3820 | ret = kallsyms_lookup(addr: ip, NULL, offset: &offset, modname: &modname, namebuf: str); |
3821 | /* Weak functions can cause invalid addresses */ |
3822 | if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) { |
3823 | snprintf(buf: str, KSYM_SYMBOL_LEN, fmt: "%s_%ld" , |
3824 | FTRACE_INVALID_FUNCTION, offset); |
3825 | ret = NULL; |
3826 | } |
3827 | |
3828 | seq_puts(m, s: str); |
3829 | if (modname) |
3830 | seq_printf(m, fmt: " [%s]" , modname); |
3831 | return ret == NULL ? -1 : 0; |
3832 | } |
3833 | #else |
3834 | static inline int test_for_valid_rec(struct dyn_ftrace *rec) |
3835 | { |
3836 | return 1; |
3837 | } |
3838 | |
3839 | static inline int print_rec(struct seq_file *m, unsigned long ip) |
3840 | { |
3841 | seq_printf(m, "%ps" , (void *)ip); |
3842 | return 0; |
3843 | } |
3844 | #endif |
3845 | |
3846 | static int t_show(struct seq_file *m, void *v) |
3847 | { |
3848 | struct ftrace_iterator *iter = m->private; |
3849 | struct dyn_ftrace *rec; |
3850 | |
3851 | if (iter->flags & FTRACE_ITER_PROBE) |
3852 | return t_probe_show(m, iter); |
3853 | |
3854 | if (iter->flags & FTRACE_ITER_MOD) |
3855 | return t_mod_show(m, iter); |
3856 | |
3857 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
3858 | if (iter->flags & FTRACE_ITER_NOTRACE) |
3859 | seq_puts(m, s: "#### no functions disabled ####\n" ); |
3860 | else |
3861 | seq_puts(m, s: "#### all functions enabled ####\n" ); |
3862 | return 0; |
3863 | } |
3864 | |
3865 | rec = iter->func; |
3866 | |
3867 | if (!rec) |
3868 | return 0; |
3869 | |
3870 | if (iter->flags & FTRACE_ITER_ADDRS) |
3871 | seq_printf(m, fmt: "%lx " , rec->ip); |
3872 | |
3873 | if (print_rec(m, ip: rec->ip)) { |
3874 | /* This should only happen when a rec is disabled */ |
3875 | WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED)); |
3876 | seq_putc(m, c: '\n'); |
3877 | return 0; |
3878 | } |
3879 | |
3880 | if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) { |
3881 | struct ftrace_ops *ops; |
3882 | |
3883 | seq_printf(m, fmt: " (%ld)%s%s%s%s%s" , |
3884 | ftrace_rec_count(rec), |
3885 | rec->flags & FTRACE_FL_REGS ? " R" : " " , |
3886 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " " , |
3887 | rec->flags & FTRACE_FL_DIRECT ? " D" : " " , |
3888 | rec->flags & FTRACE_FL_CALL_OPS ? " O" : " " , |
3889 | rec->flags & FTRACE_FL_MODIFIED ? " M " : " " ); |
3890 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
3891 | ops = ftrace_find_tramp_ops_any(rec); |
3892 | if (ops) { |
3893 | do { |
3894 | seq_printf(m, fmt: "\ttramp: %pS (%pS)" , |
3895 | (void *)ops->trampoline, |
3896 | (void *)ops->func); |
3897 | add_trampoline_func(m, ops, rec); |
3898 | ops = ftrace_find_tramp_ops_next(rec, op: ops); |
3899 | } while (ops); |
3900 | } else |
3901 | seq_puts(m, s: "\ttramp: ERROR!" ); |
3902 | } else { |
3903 | add_trampoline_func(m, NULL, rec); |
3904 | } |
3905 | if (rec->flags & FTRACE_FL_CALL_OPS_EN) { |
3906 | ops = ftrace_find_unique_ops(rec); |
3907 | if (ops) { |
3908 | seq_printf(m, fmt: "\tops: %pS (%pS)" , |
3909 | ops, ops->func); |
3910 | } else { |
3911 | seq_puts(m, s: "\tops: ERROR!" ); |
3912 | } |
3913 | } |
3914 | if (rec->flags & FTRACE_FL_DIRECT) { |
3915 | unsigned long direct; |
3916 | |
3917 | direct = ftrace_find_rec_direct(ip: rec->ip); |
3918 | if (direct) |
3919 | seq_printf(m, fmt: "\n\tdirect-->%pS" , (void *)direct); |
3920 | } |
3921 | } |
3922 | |
3923 | seq_putc(m, c: '\n'); |
3924 | |
3925 | return 0; |
3926 | } |
3927 | |
3928 | static const struct seq_operations show_ftrace_seq_ops = { |
3929 | .start = t_start, |
3930 | .next = t_next, |
3931 | .stop = t_stop, |
3932 | .show = t_show, |
3933 | }; |
3934 | |
3935 | static int |
3936 | ftrace_avail_open(struct inode *inode, struct file *file) |
3937 | { |
3938 | struct ftrace_iterator *iter; |
3939 | int ret; |
3940 | |
3941 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
3942 | if (ret) |
3943 | return ret; |
3944 | |
3945 | if (unlikely(ftrace_disabled)) |
3946 | return -ENODEV; |
3947 | |
3948 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3949 | if (!iter) |
3950 | return -ENOMEM; |
3951 | |
3952 | iter->pg = ftrace_pages_start; |
3953 | iter->ops = &global_ops; |
3954 | |
3955 | return 0; |
3956 | } |
3957 | |
3958 | static int |
3959 | ftrace_enabled_open(struct inode *inode, struct file *file) |
3960 | { |
3961 | struct ftrace_iterator *iter; |
3962 | |
3963 | /* |
3964 | * This shows us what functions are currently being |
3965 | * traced and by what. Not sure if we want lockdown |
3966 | * to hide such critical information for an admin. |
3967 | * Although, perhaps it can show information we don't |
3968 | * want people to see, but if something is tracing |
3969 | * something, we probably want to know about it. |
3970 | */ |
3971 | |
3972 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3973 | if (!iter) |
3974 | return -ENOMEM; |
3975 | |
3976 | iter->pg = ftrace_pages_start; |
3977 | iter->flags = FTRACE_ITER_ENABLED; |
3978 | iter->ops = &global_ops; |
3979 | |
3980 | return 0; |
3981 | } |
3982 | |
3983 | static int |
3984 | ftrace_touched_open(struct inode *inode, struct file *file) |
3985 | { |
3986 | struct ftrace_iterator *iter; |
3987 | |
3988 | /* |
3989 | * This shows us what functions have ever been enabled |
3990 | * (traced, direct, patched, etc). Not sure if we want lockdown |
3991 | * to hide such critical information for an admin. |
3992 | * Although, perhaps it can show information we don't |
3993 | * want people to see, but if something had traced |
3994 | * something, we probably want to know about it. |
3995 | */ |
3996 | |
3997 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
3998 | if (!iter) |
3999 | return -ENOMEM; |
4000 | |
4001 | iter->pg = ftrace_pages_start; |
4002 | iter->flags = FTRACE_ITER_TOUCHED; |
4003 | iter->ops = &global_ops; |
4004 | |
4005 | return 0; |
4006 | } |
4007 | |
4008 | static int |
4009 | ftrace_avail_addrs_open(struct inode *inode, struct file *file) |
4010 | { |
4011 | struct ftrace_iterator *iter; |
4012 | int ret; |
4013 | |
4014 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
4015 | if (ret) |
4016 | return ret; |
4017 | |
4018 | if (unlikely(ftrace_disabled)) |
4019 | return -ENODEV; |
4020 | |
4021 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
4022 | if (!iter) |
4023 | return -ENOMEM; |
4024 | |
4025 | iter->pg = ftrace_pages_start; |
4026 | iter->flags = FTRACE_ITER_ADDRS; |
4027 | iter->ops = &global_ops; |
4028 | |
4029 | return 0; |
4030 | } |
4031 | |
4032 | /** |
4033 | * ftrace_regex_open - initialize function tracer filter files |
4034 | * @ops: The ftrace_ops that hold the hash filters |
4035 | * @flag: The type of filter to process |
4036 | * @inode: The inode, usually passed in to your open routine |
4037 | * @file: The file, usually passed in to your open routine |
4038 | * |
4039 | * ftrace_regex_open() initializes the filter files for the |
4040 | * @ops. Depending on @flag it may process the filter hash or |
4041 | * the notrace hash of @ops. With this called from the open |
4042 | * routine, you can use ftrace_filter_write() for the write |
4043 | * routine if @flag has FTRACE_ITER_FILTER set, or |
4044 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
4045 | * tracing_lseek() should be used as the lseek routine, and |
4046 | * release must call ftrace_regex_release(). |
4047 | */ |
4048 | int |
4049 | ftrace_regex_open(struct ftrace_ops *ops, int flag, |
4050 | struct inode *inode, struct file *file) |
4051 | { |
4052 | struct ftrace_iterator *iter; |
4053 | struct ftrace_hash *hash; |
4054 | struct list_head *mod_head; |
4055 | struct trace_array *tr = ops->private; |
4056 | int ret = -ENOMEM; |
4057 | |
4058 | ftrace_ops_init(ops); |
4059 | |
4060 | if (unlikely(ftrace_disabled)) |
4061 | return -ENODEV; |
4062 | |
4063 | if (tracing_check_open_get_tr(tr)) |
4064 | return -ENODEV; |
4065 | |
4066 | iter = kzalloc(size: sizeof(*iter), GFP_KERNEL); |
4067 | if (!iter) |
4068 | goto out; |
4069 | |
4070 | if (trace_parser_get_init(parser: &iter->parser, FTRACE_BUFF_MAX)) |
4071 | goto out; |
4072 | |
4073 | iter->ops = ops; |
4074 | iter->flags = flag; |
4075 | iter->tr = tr; |
4076 | |
4077 | mutex_lock(&ops->func_hash->regex_lock); |
4078 | |
4079 | if (flag & FTRACE_ITER_NOTRACE) { |
4080 | hash = ops->func_hash->notrace_hash; |
4081 | mod_head = tr ? &tr->mod_notrace : NULL; |
4082 | } else { |
4083 | hash = ops->func_hash->filter_hash; |
4084 | mod_head = tr ? &tr->mod_trace : NULL; |
4085 | } |
4086 | |
4087 | iter->mod_list = mod_head; |
4088 | |
4089 | if (file->f_mode & FMODE_WRITE) { |
4090 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
4091 | |
4092 | if (file->f_flags & O_TRUNC) { |
4093 | iter->hash = alloc_ftrace_hash(size_bits); |
4094 | clear_ftrace_mod_list(head: mod_head); |
4095 | } else { |
4096 | iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); |
4097 | } |
4098 | |
4099 | if (!iter->hash) { |
4100 | trace_parser_put(parser: &iter->parser); |
4101 | goto out_unlock; |
4102 | } |
4103 | } else |
4104 | iter->hash = hash; |
4105 | |
4106 | ret = 0; |
4107 | |
4108 | if (file->f_mode & FMODE_READ) { |
4109 | iter->pg = ftrace_pages_start; |
4110 | |
4111 | ret = seq_open(file, &show_ftrace_seq_ops); |
4112 | if (!ret) { |
4113 | struct seq_file *m = file->private_data; |
4114 | m->private = iter; |
4115 | } else { |
4116 | /* Failed */ |
4117 | free_ftrace_hash(hash: iter->hash); |
4118 | trace_parser_put(parser: &iter->parser); |
4119 | } |
4120 | } else |
4121 | file->private_data = iter; |
4122 | |
4123 | out_unlock: |
4124 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
4125 | |
4126 | out: |
4127 | if (ret) { |
4128 | kfree(objp: iter); |
4129 | if (tr) |
4130 | trace_array_put(tr); |
4131 | } |
4132 | |
4133 | return ret; |
4134 | } |
4135 | |
4136 | static int |
4137 | ftrace_filter_open(struct inode *inode, struct file *file) |
4138 | { |
4139 | struct ftrace_ops *ops = inode->i_private; |
4140 | |
4141 | /* Checks for tracefs lockdown */ |
4142 | return ftrace_regex_open(ops, |
4143 | flag: FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, |
4144 | inode, file); |
4145 | } |
4146 | |
4147 | static int |
4148 | ftrace_notrace_open(struct inode *inode, struct file *file) |
4149 | { |
4150 | struct ftrace_ops *ops = inode->i_private; |
4151 | |
4152 | /* Checks for tracefs lockdown */ |
4153 | return ftrace_regex_open(ops, flag: FTRACE_ITER_NOTRACE, |
4154 | inode, file); |
4155 | } |
4156 | |
4157 | /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ |
4158 | struct ftrace_glob { |
4159 | char *search; |
4160 | unsigned len; |
4161 | int type; |
4162 | }; |
4163 | |
4164 | /* |
4165 | * If symbols in an architecture don't correspond exactly to the user-visible |
4166 | * name of what they represent, it is possible to define this function to |
4167 | * perform the necessary adjustments. |
4168 | */ |
4169 | char * __weak arch_ftrace_match_adjust(char *str, const char *search) |
4170 | { |
4171 | return str; |
4172 | } |
4173 | |
4174 | static int ftrace_match(char *str, struct ftrace_glob *g) |
4175 | { |
4176 | int matched = 0; |
4177 | int slen; |
4178 | |
4179 | str = arch_ftrace_match_adjust(str, search: g->search); |
4180 | |
4181 | switch (g->type) { |
4182 | case MATCH_FULL: |
4183 | if (strcmp(str, g->search) == 0) |
4184 | matched = 1; |
4185 | break; |
4186 | case MATCH_FRONT_ONLY: |
4187 | if (strncmp(str, g->search, g->len) == 0) |
4188 | matched = 1; |
4189 | break; |
4190 | case MATCH_MIDDLE_ONLY: |
4191 | if (strstr(str, g->search)) |
4192 | matched = 1; |
4193 | break; |
4194 | case MATCH_END_ONLY: |
4195 | slen = strlen(str); |
4196 | if (slen >= g->len && |
4197 | memcmp(p: str + slen - g->len, q: g->search, size: g->len) == 0) |
4198 | matched = 1; |
4199 | break; |
4200 | case MATCH_GLOB: |
4201 | if (glob_match(pat: g->search, str)) |
4202 | matched = 1; |
4203 | break; |
4204 | } |
4205 | |
4206 | return matched; |
4207 | } |
4208 | |
4209 | static int |
4210 | enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) |
4211 | { |
4212 | struct ftrace_func_entry *entry; |
4213 | int ret = 0; |
4214 | |
4215 | entry = ftrace_lookup_ip(hash, ip: rec->ip); |
4216 | if (clear_filter) { |
4217 | /* Do nothing if it doesn't exist */ |
4218 | if (!entry) |
4219 | return 0; |
4220 | |
4221 | free_hash_entry(hash, entry); |
4222 | } else { |
4223 | /* Do nothing if it exists */ |
4224 | if (entry) |
4225 | return 0; |
4226 | |
4227 | ret = add_hash_entry(hash, ip: rec->ip); |
4228 | } |
4229 | return ret; |
4230 | } |
4231 | |
4232 | static int |
4233 | add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, |
4234 | int clear_filter) |
4235 | { |
4236 | long index = simple_strtoul(func_g->search, NULL, 0); |
4237 | struct ftrace_page *pg; |
4238 | struct dyn_ftrace *rec; |
4239 | |
4240 | /* The index starts at 1 */ |
4241 | if (--index < 0) |
4242 | return 0; |
4243 | |
4244 | do_for_each_ftrace_rec(pg, rec) { |
4245 | if (pg->index <= index) { |
4246 | index -= pg->index; |
4247 | /* this is a double loop, break goes to the next page */ |
4248 | break; |
4249 | } |
4250 | rec = &pg->records[index]; |
4251 | enter_record(hash, rec, clear_filter); |
4252 | return 1; |
4253 | } while_for_each_ftrace_rec(); |
4254 | return 0; |
4255 | } |
4256 | |
4257 | #ifdef FTRACE_MCOUNT_MAX_OFFSET |
4258 | static int lookup_ip(unsigned long ip, char **modname, char *str) |
4259 | { |
4260 | unsigned long offset; |
4261 | |
4262 | kallsyms_lookup(addr: ip, NULL, offset: &offset, modname, namebuf: str); |
4263 | if (offset > FTRACE_MCOUNT_MAX_OFFSET) |
4264 | return -1; |
4265 | return 0; |
4266 | } |
4267 | #else |
4268 | static int lookup_ip(unsigned long ip, char **modname, char *str) |
4269 | { |
4270 | kallsyms_lookup(ip, NULL, NULL, modname, str); |
4271 | return 0; |
4272 | } |
4273 | #endif |
4274 | |
4275 | static int |
4276 | ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, |
4277 | struct ftrace_glob *mod_g, int exclude_mod) |
4278 | { |
4279 | char str[KSYM_SYMBOL_LEN]; |
4280 | char *modname; |
4281 | |
4282 | if (lookup_ip(ip: rec->ip, modname: &modname, str)) { |
4283 | /* This should only happen when a rec is disabled */ |
4284 | WARN_ON_ONCE(system_state == SYSTEM_RUNNING && |
4285 | !(rec->flags & FTRACE_FL_DISABLED)); |
4286 | return 0; |
4287 | } |
4288 | |
4289 | if (mod_g) { |
4290 | int mod_matches = (modname) ? ftrace_match(str: modname, g: mod_g) : 0; |
4291 | |
4292 | /* blank module name to match all modules */ |
4293 | if (!mod_g->len) { |
4294 | /* blank module globbing: modname xor exclude_mod */ |
4295 | if (!exclude_mod != !modname) |
4296 | goto func_match; |
4297 | return 0; |
4298 | } |
4299 | |
4300 | /* |
4301 | * exclude_mod is set to trace everything but the given |
4302 | * module. If it is set and the module matches, then |
4303 | * return 0. If it is not set, and the module doesn't match |
4304 | * also return 0. Otherwise, check the function to see if |
4305 | * that matches. |
4306 | */ |
4307 | if (!mod_matches == !exclude_mod) |
4308 | return 0; |
4309 | func_match: |
4310 | /* blank search means to match all funcs in the mod */ |
4311 | if (!func_g->len) |
4312 | return 1; |
4313 | } |
4314 | |
4315 | return ftrace_match(str, g: func_g); |
4316 | } |
4317 | |
4318 | static int |
4319 | match_records(struct ftrace_hash *hash, char *func, int len, char *mod) |
4320 | { |
4321 | struct ftrace_page *pg; |
4322 | struct dyn_ftrace *rec; |
4323 | struct ftrace_glob func_g = { .type = MATCH_FULL }; |
4324 | struct ftrace_glob mod_g = { .type = MATCH_FULL }; |
4325 | struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; |
4326 | int exclude_mod = 0; |
4327 | int found = 0; |
4328 | int ret; |
4329 | int clear_filter = 0; |
4330 | |
4331 | if (func) { |
4332 | func_g.type = filter_parse_regex(buff: func, len, search: &func_g.search, |
4333 | not: &clear_filter); |
4334 | func_g.len = strlen(func_g.search); |
4335 | } |
4336 | |
4337 | if (mod) { |
4338 | mod_g.type = filter_parse_regex(buff: mod, strlen(mod), |
4339 | search: &mod_g.search, not: &exclude_mod); |
4340 | mod_g.len = strlen(mod_g.search); |
4341 | } |
4342 | |
4343 | mutex_lock(&ftrace_lock); |
4344 | |
4345 | if (unlikely(ftrace_disabled)) |
4346 | goto out_unlock; |
4347 | |
4348 | if (func_g.type == MATCH_INDEX) { |
4349 | found = add_rec_by_index(hash, func_g: &func_g, clear_filter); |
4350 | goto out_unlock; |
4351 | } |
4352 | |
4353 | do_for_each_ftrace_rec(pg, rec) { |
4354 | |
4355 | if (rec->flags & FTRACE_FL_DISABLED) |
4356 | continue; |
4357 | |
4358 | if (ftrace_match_record(rec, func_g: &func_g, mod_g: mod_match, exclude_mod)) { |
4359 | ret = enter_record(hash, rec, clear_filter); |
4360 | if (ret < 0) { |
4361 | found = ret; |
4362 | goto out_unlock; |
4363 | } |
4364 | found = 1; |
4365 | } |
4366 | cond_resched(); |
4367 | } while_for_each_ftrace_rec(); |
4368 | out_unlock: |
4369 | mutex_unlock(lock: &ftrace_lock); |
4370 | |
4371 | return found; |
4372 | } |
4373 | |
4374 | static int |
4375 | ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) |
4376 | { |
4377 | return match_records(hash, func: buff, len, NULL); |
4378 | } |
4379 | |
4380 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
4381 | struct ftrace_ops_hash *old_hash) |
4382 | { |
4383 | struct ftrace_ops *op; |
4384 | |
4385 | if (!ftrace_enabled) |
4386 | return; |
4387 | |
4388 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { |
4389 | ftrace_run_modify_code(ops, command: FTRACE_UPDATE_CALLS, old_hash); |
4390 | return; |
4391 | } |
4392 | |
4393 | /* |
4394 | * If this is the shared global_ops filter, then we need to |
4395 | * check if there is another ops that shares it, is enabled. |
4396 | * If so, we still need to run the modify code. |
4397 | */ |
4398 | if (ops->func_hash != &global_ops.local_hash) |
4399 | return; |
4400 | |
4401 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4402 | if (op->func_hash == &global_ops.local_hash && |
4403 | op->flags & FTRACE_OPS_FL_ENABLED) { |
4404 | ftrace_run_modify_code(ops: op, command: FTRACE_UPDATE_CALLS, old_hash); |
4405 | /* Only need to do this once */ |
4406 | return; |
4407 | } |
4408 | } while_for_each_ftrace_op(op); |
4409 | } |
4410 | |
4411 | static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, |
4412 | struct ftrace_hash **orig_hash, |
4413 | struct ftrace_hash *hash, |
4414 | int enable) |
4415 | { |
4416 | struct ftrace_ops_hash old_hash_ops; |
4417 | struct ftrace_hash *old_hash; |
4418 | int ret; |
4419 | |
4420 | old_hash = *orig_hash; |
4421 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; |
4422 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; |
4423 | ret = ftrace_hash_move(ops, enable, dst: orig_hash, src: hash); |
4424 | if (!ret) { |
4425 | ftrace_ops_update_code(ops, old_hash: &old_hash_ops); |
4426 | free_ftrace_hash_rcu(hash: old_hash); |
4427 | } |
4428 | return ret; |
4429 | } |
4430 | |
4431 | static bool module_exists(const char *module) |
4432 | { |
4433 | /* All modules have the symbol __this_module */ |
4434 | static const char this_mod[] = "__this_module" ; |
4435 | char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; |
4436 | unsigned long val; |
4437 | int n; |
4438 | |
4439 | n = snprintf(buf: modname, size: sizeof(modname), fmt: "%s:%s" , module, this_mod); |
4440 | |
4441 | if (n > sizeof(modname) - 1) |
4442 | return false; |
4443 | |
4444 | val = module_kallsyms_lookup_name(name: modname); |
4445 | return val != 0; |
4446 | } |
4447 | |
4448 | static int cache_mod(struct trace_array *tr, |
4449 | const char *func, char *module, int enable) |
4450 | { |
4451 | struct ftrace_mod_load *ftrace_mod, *n; |
4452 | struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; |
4453 | int ret; |
4454 | |
4455 | mutex_lock(&ftrace_lock); |
4456 | |
4457 | /* We do not cache inverse filters */ |
4458 | if (func[0] == '!') { |
4459 | func++; |
4460 | ret = -EINVAL; |
4461 | |
4462 | /* Look to remove this hash */ |
4463 | list_for_each_entry_safe(ftrace_mod, n, head, list) { |
4464 | if (strcmp(ftrace_mod->module, module) != 0) |
4465 | continue; |
4466 | |
4467 | /* no func matches all */ |
4468 | if (strcmp(func, "*" ) == 0 || |
4469 | (ftrace_mod->func && |
4470 | strcmp(ftrace_mod->func, func) == 0)) { |
4471 | ret = 0; |
4472 | free_ftrace_mod(ftrace_mod); |
4473 | continue; |
4474 | } |
4475 | } |
4476 | goto out; |
4477 | } |
4478 | |
4479 | ret = -EINVAL; |
4480 | /* We only care about modules that have not been loaded yet */ |
4481 | if (module_exists(module)) |
4482 | goto out; |
4483 | |
4484 | /* Save this string off, and execute it when the module is loaded */ |
4485 | ret = ftrace_add_mod(tr, func, module, enable); |
4486 | out: |
4487 | mutex_unlock(lock: &ftrace_lock); |
4488 | |
4489 | return ret; |
4490 | } |
4491 | |
4492 | static int |
4493 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
4494 | int reset, int enable); |
4495 | |
4496 | #ifdef CONFIG_MODULES |
4497 | static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, |
4498 | char *mod, bool enable) |
4499 | { |
4500 | struct ftrace_mod_load *ftrace_mod, *n; |
4501 | struct ftrace_hash **orig_hash, *new_hash; |
4502 | LIST_HEAD(process_mods); |
4503 | char *func; |
4504 | |
4505 | mutex_lock(&ops->func_hash->regex_lock); |
4506 | |
4507 | if (enable) |
4508 | orig_hash = &ops->func_hash->filter_hash; |
4509 | else |
4510 | orig_hash = &ops->func_hash->notrace_hash; |
4511 | |
4512 | new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, |
4513 | hash: *orig_hash); |
4514 | if (!new_hash) |
4515 | goto out; /* warn? */ |
4516 | |
4517 | mutex_lock(&ftrace_lock); |
4518 | |
4519 | list_for_each_entry_safe(ftrace_mod, n, head, list) { |
4520 | |
4521 | if (strcmp(ftrace_mod->module, mod) != 0) |
4522 | continue; |
4523 | |
4524 | if (ftrace_mod->func) |
4525 | func = kstrdup(s: ftrace_mod->func, GFP_KERNEL); |
4526 | else |
4527 | func = kstrdup(s: "*" , GFP_KERNEL); |
4528 | |
4529 | if (!func) /* warn? */ |
4530 | continue; |
4531 | |
4532 | list_move(list: &ftrace_mod->list, head: &process_mods); |
4533 | |
4534 | /* Use the newly allocated func, as it may be "*" */ |
4535 | kfree(objp: ftrace_mod->func); |
4536 | ftrace_mod->func = func; |
4537 | } |
4538 | |
4539 | mutex_unlock(lock: &ftrace_lock); |
4540 | |
4541 | list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { |
4542 | |
4543 | func = ftrace_mod->func; |
4544 | |
4545 | /* Grabs ftrace_lock, which is why we have this extra step */ |
4546 | match_records(hash: new_hash, func, strlen(func), mod); |
4547 | free_ftrace_mod(ftrace_mod); |
4548 | } |
4549 | |
4550 | if (enable && list_empty(head)) |
4551 | new_hash->flags &= ~FTRACE_HASH_FL_MOD; |
4552 | |
4553 | mutex_lock(&ftrace_lock); |
4554 | |
4555 | ftrace_hash_move_and_update_ops(ops, orig_hash, |
4556 | hash: new_hash, enable); |
4557 | mutex_unlock(lock: &ftrace_lock); |
4558 | |
4559 | out: |
4560 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
4561 | |
4562 | free_ftrace_hash(hash: new_hash); |
4563 | } |
4564 | |
4565 | static void process_cached_mods(const char *mod_name) |
4566 | { |
4567 | struct trace_array *tr; |
4568 | char *mod; |
4569 | |
4570 | mod = kstrdup(s: mod_name, GFP_KERNEL); |
4571 | if (!mod) |
4572 | return; |
4573 | |
4574 | mutex_lock(&trace_types_lock); |
4575 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
4576 | if (!list_empty(head: &tr->mod_trace)) |
4577 | process_mod_list(head: &tr->mod_trace, ops: tr->ops, mod, enable: true); |
4578 | if (!list_empty(head: &tr->mod_notrace)) |
4579 | process_mod_list(head: &tr->mod_notrace, ops: tr->ops, mod, enable: false); |
4580 | } |
4581 | mutex_unlock(lock: &trace_types_lock); |
4582 | |
4583 | kfree(objp: mod); |
4584 | } |
4585 | #endif |
4586 | |
4587 | /* |
4588 | * We register the module command as a template to show others how |
4589 | * to register the a command as well. |
4590 | */ |
4591 | |
4592 | static int |
4593 | ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, |
4594 | char *func_orig, char *cmd, char *module, int enable) |
4595 | { |
4596 | char *func; |
4597 | int ret; |
4598 | |
4599 | /* match_records() modifies func, and we need the original */ |
4600 | func = kstrdup(s: func_orig, GFP_KERNEL); |
4601 | if (!func) |
4602 | return -ENOMEM; |
4603 | |
4604 | /* |
4605 | * cmd == 'mod' because we only registered this func |
4606 | * for the 'mod' ftrace_func_command. |
4607 | * But if you register one func with multiple commands, |
4608 | * you can tell which command was used by the cmd |
4609 | * parameter. |
4610 | */ |
4611 | ret = match_records(hash, func, strlen(func), mod: module); |
4612 | kfree(objp: func); |
4613 | |
4614 | if (!ret) |
4615 | return cache_mod(tr, func: func_orig, module, enable); |
4616 | if (ret < 0) |
4617 | return ret; |
4618 | return 0; |
4619 | } |
4620 | |
4621 | static struct ftrace_func_command ftrace_mod_cmd = { |
4622 | .name = "mod" , |
4623 | .func = ftrace_mod_callback, |
4624 | }; |
4625 | |
4626 | static int __init ftrace_mod_cmd_init(void) |
4627 | { |
4628 | return register_ftrace_command(cmd: &ftrace_mod_cmd); |
4629 | } |
4630 | core_initcall(ftrace_mod_cmd_init); |
4631 | |
4632 | static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, |
4633 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
4634 | { |
4635 | struct ftrace_probe_ops *probe_ops; |
4636 | struct ftrace_func_probe *probe; |
4637 | |
4638 | probe = container_of(op, struct ftrace_func_probe, ops); |
4639 | probe_ops = probe->probe_ops; |
4640 | |
4641 | /* |
4642 | * Disable preemption for these calls to prevent a RCU grace |
4643 | * period. This syncs the hash iteration and freeing of items |
4644 | * on the hash. rcu_read_lock is too dangerous here. |
4645 | */ |
4646 | preempt_disable_notrace(); |
4647 | probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); |
4648 | preempt_enable_notrace(); |
4649 | } |
4650 | |
4651 | struct ftrace_func_map { |
4652 | struct ftrace_func_entry entry; |
4653 | void *data; |
4654 | }; |
4655 | |
4656 | struct ftrace_func_mapper { |
4657 | struct ftrace_hash hash; |
4658 | }; |
4659 | |
4660 | /** |
4661 | * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper |
4662 | * |
4663 | * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. |
4664 | */ |
4665 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) |
4666 | { |
4667 | struct ftrace_hash *hash; |
4668 | |
4669 | /* |
4670 | * The mapper is simply a ftrace_hash, but since the entries |
4671 | * in the hash are not ftrace_func_entry type, we define it |
4672 | * as a separate structure. |
4673 | */ |
4674 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
4675 | return (struct ftrace_func_mapper *)hash; |
4676 | } |
4677 | |
4678 | /** |
4679 | * ftrace_func_mapper_find_ip - Find some data mapped to an ip |
4680 | * @mapper: The mapper that has the ip maps |
4681 | * @ip: the instruction pointer to find the data for |
4682 | * |
4683 | * Returns the data mapped to @ip if found otherwise NULL. The return |
4684 | * is actually the address of the mapper data pointer. The address is |
4685 | * returned for use cases where the data is no bigger than a long, and |
4686 | * the user can use the data pointer as its data instead of having to |
4687 | * allocate more memory for the reference. |
4688 | */ |
4689 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, |
4690 | unsigned long ip) |
4691 | { |
4692 | struct ftrace_func_entry *entry; |
4693 | struct ftrace_func_map *map; |
4694 | |
4695 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4696 | if (!entry) |
4697 | return NULL; |
4698 | |
4699 | map = (struct ftrace_func_map *)entry; |
4700 | return &map->data; |
4701 | } |
4702 | |
4703 | /** |
4704 | * ftrace_func_mapper_add_ip - Map some data to an ip |
4705 | * @mapper: The mapper that has the ip maps |
4706 | * @ip: The instruction pointer address to map @data to |
4707 | * @data: The data to map to @ip |
4708 | * |
4709 | * Returns 0 on success otherwise an error. |
4710 | */ |
4711 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, |
4712 | unsigned long ip, void *data) |
4713 | { |
4714 | struct ftrace_func_entry *entry; |
4715 | struct ftrace_func_map *map; |
4716 | |
4717 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4718 | if (entry) |
4719 | return -EBUSY; |
4720 | |
4721 | map = kmalloc(size: sizeof(*map), GFP_KERNEL); |
4722 | if (!map) |
4723 | return -ENOMEM; |
4724 | |
4725 | map->entry.ip = ip; |
4726 | map->data = data; |
4727 | |
4728 | __add_hash_entry(hash: &mapper->hash, entry: &map->entry); |
4729 | |
4730 | return 0; |
4731 | } |
4732 | |
4733 | /** |
4734 | * ftrace_func_mapper_remove_ip - Remove an ip from the mapping |
4735 | * @mapper: The mapper that has the ip maps |
4736 | * @ip: The instruction pointer address to remove the data from |
4737 | * |
4738 | * Returns the data if it is found, otherwise NULL. |
4739 | * Note, if the data pointer is used as the data itself, (see |
4740 | * ftrace_func_mapper_find_ip(), then the return value may be meaningless, |
4741 | * if the data pointer was set to zero. |
4742 | */ |
4743 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, |
4744 | unsigned long ip) |
4745 | { |
4746 | struct ftrace_func_entry *entry; |
4747 | struct ftrace_func_map *map; |
4748 | void *data; |
4749 | |
4750 | entry = ftrace_lookup_ip(hash: &mapper->hash, ip); |
4751 | if (!entry) |
4752 | return NULL; |
4753 | |
4754 | map = (struct ftrace_func_map *)entry; |
4755 | data = map->data; |
4756 | |
4757 | remove_hash_entry(hash: &mapper->hash, entry); |
4758 | kfree(objp: entry); |
4759 | |
4760 | return data; |
4761 | } |
4762 | |
4763 | /** |
4764 | * free_ftrace_func_mapper - free a mapping of ips and data |
4765 | * @mapper: The mapper that has the ip maps |
4766 | * @free_func: A function to be called on each data item. |
4767 | * |
4768 | * This is used to free the function mapper. The @free_func is optional |
4769 | * and can be used if the data needs to be freed as well. |
4770 | */ |
4771 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, |
4772 | ftrace_mapper_func free_func) |
4773 | { |
4774 | struct ftrace_func_entry *entry; |
4775 | struct ftrace_func_map *map; |
4776 | struct hlist_head *hhd; |
4777 | int size, i; |
4778 | |
4779 | if (!mapper) |
4780 | return; |
4781 | |
4782 | if (free_func && mapper->hash.count) { |
4783 | size = 1 << mapper->hash.size_bits; |
4784 | for (i = 0; i < size; i++) { |
4785 | hhd = &mapper->hash.buckets[i]; |
4786 | hlist_for_each_entry(entry, hhd, hlist) { |
4787 | map = (struct ftrace_func_map *)entry; |
4788 | free_func(map); |
4789 | } |
4790 | } |
4791 | } |
4792 | free_ftrace_hash(hash: &mapper->hash); |
4793 | } |
4794 | |
4795 | static void release_probe(struct ftrace_func_probe *probe) |
4796 | { |
4797 | struct ftrace_probe_ops *probe_ops; |
4798 | |
4799 | mutex_lock(&ftrace_lock); |
4800 | |
4801 | WARN_ON(probe->ref <= 0); |
4802 | |
4803 | /* Subtract the ref that was used to protect this instance */ |
4804 | probe->ref--; |
4805 | |
4806 | if (!probe->ref) { |
4807 | probe_ops = probe->probe_ops; |
4808 | /* |
4809 | * Sending zero as ip tells probe_ops to free |
4810 | * the probe->data itself |
4811 | */ |
4812 | if (probe_ops->free) |
4813 | probe_ops->free(probe_ops, probe->tr, 0, probe->data); |
4814 | list_del(entry: &probe->list); |
4815 | kfree(objp: probe); |
4816 | } |
4817 | mutex_unlock(lock: &ftrace_lock); |
4818 | } |
4819 | |
4820 | static void acquire_probe_locked(struct ftrace_func_probe *probe) |
4821 | { |
4822 | /* |
4823 | * Add one ref to keep it from being freed when releasing the |
4824 | * ftrace_lock mutex. |
4825 | */ |
4826 | probe->ref++; |
4827 | } |
4828 | |
4829 | int |
4830 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
4831 | struct ftrace_probe_ops *probe_ops, |
4832 | void *data) |
4833 | { |
4834 | struct ftrace_func_probe *probe = NULL, *iter; |
4835 | struct ftrace_func_entry *entry; |
4836 | struct ftrace_hash **orig_hash; |
4837 | struct ftrace_hash *old_hash; |
4838 | struct ftrace_hash *hash; |
4839 | int count = 0; |
4840 | int size; |
4841 | int ret; |
4842 | int i; |
4843 | |
4844 | if (WARN_ON(!tr)) |
4845 | return -EINVAL; |
4846 | |
4847 | /* We do not support '!' for function probes */ |
4848 | if (WARN_ON(glob[0] == '!')) |
4849 | return -EINVAL; |
4850 | |
4851 | |
4852 | mutex_lock(&ftrace_lock); |
4853 | /* Check if the probe_ops is already registered */ |
4854 | list_for_each_entry(iter, &tr->func_probes, list) { |
4855 | if (iter->probe_ops == probe_ops) { |
4856 | probe = iter; |
4857 | break; |
4858 | } |
4859 | } |
4860 | if (!probe) { |
4861 | probe = kzalloc(size: sizeof(*probe), GFP_KERNEL); |
4862 | if (!probe) { |
4863 | mutex_unlock(lock: &ftrace_lock); |
4864 | return -ENOMEM; |
4865 | } |
4866 | probe->probe_ops = probe_ops; |
4867 | probe->ops.func = function_trace_probe_call; |
4868 | probe->tr = tr; |
4869 | ftrace_ops_init(ops: &probe->ops); |
4870 | list_add(new: &probe->list, head: &tr->func_probes); |
4871 | } |
4872 | |
4873 | acquire_probe_locked(probe); |
4874 | |
4875 | mutex_unlock(lock: &ftrace_lock); |
4876 | |
4877 | /* |
4878 | * Note, there's a small window here that the func_hash->filter_hash |
4879 | * may be NULL or empty. Need to be careful when reading the loop. |
4880 | */ |
4881 | mutex_lock(&probe->ops.func_hash->regex_lock); |
4882 | |
4883 | orig_hash = &probe->ops.func_hash->filter_hash; |
4884 | old_hash = *orig_hash; |
4885 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: old_hash); |
4886 | |
4887 | if (!hash) { |
4888 | ret = -ENOMEM; |
4889 | goto out; |
4890 | } |
4891 | |
4892 | ret = ftrace_match_records(hash, buff: glob, strlen(glob)); |
4893 | |
4894 | /* Nothing found? */ |
4895 | if (!ret) |
4896 | ret = -EINVAL; |
4897 | |
4898 | if (ret < 0) |
4899 | goto out; |
4900 | |
4901 | size = 1 << hash->size_bits; |
4902 | for (i = 0; i < size; i++) { |
4903 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
4904 | if (ftrace_lookup_ip(hash: old_hash, ip: entry->ip)) |
4905 | continue; |
4906 | /* |
4907 | * The caller might want to do something special |
4908 | * for each function we find. We call the callback |
4909 | * to give the caller an opportunity to do so. |
4910 | */ |
4911 | if (probe_ops->init) { |
4912 | ret = probe_ops->init(probe_ops, tr, |
4913 | entry->ip, data, |
4914 | &probe->data); |
4915 | if (ret < 0) { |
4916 | if (probe_ops->free && count) |
4917 | probe_ops->free(probe_ops, tr, |
4918 | 0, probe->data); |
4919 | probe->data = NULL; |
4920 | goto out; |
4921 | } |
4922 | } |
4923 | count++; |
4924 | } |
4925 | } |
4926 | |
4927 | mutex_lock(&ftrace_lock); |
4928 | |
4929 | if (!count) { |
4930 | /* Nothing was added? */ |
4931 | ret = -EINVAL; |
4932 | goto out_unlock; |
4933 | } |
4934 | |
4935 | ret = ftrace_hash_move_and_update_ops(ops: &probe->ops, orig_hash, |
4936 | hash, enable: 1); |
4937 | if (ret < 0) |
4938 | goto err_unlock; |
4939 | |
4940 | /* One ref for each new function traced */ |
4941 | probe->ref += count; |
4942 | |
4943 | if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) |
4944 | ret = ftrace_startup(ops: &probe->ops, command: 0); |
4945 | |
4946 | out_unlock: |
4947 | mutex_unlock(lock: &ftrace_lock); |
4948 | |
4949 | if (!ret) |
4950 | ret = count; |
4951 | out: |
4952 | mutex_unlock(lock: &probe->ops.func_hash->regex_lock); |
4953 | free_ftrace_hash(hash); |
4954 | |
4955 | release_probe(probe); |
4956 | |
4957 | return ret; |
4958 | |
4959 | err_unlock: |
4960 | if (!probe_ops->free || !count) |
4961 | goto out_unlock; |
4962 | |
4963 | /* Failed to do the move, need to call the free functions */ |
4964 | for (i = 0; i < size; i++) { |
4965 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
4966 | if (ftrace_lookup_ip(hash: old_hash, ip: entry->ip)) |
4967 | continue; |
4968 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
4969 | } |
4970 | } |
4971 | goto out_unlock; |
4972 | } |
4973 | |
4974 | int |
4975 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
4976 | struct ftrace_probe_ops *probe_ops) |
4977 | { |
4978 | struct ftrace_func_probe *probe = NULL, *iter; |
4979 | struct ftrace_ops_hash old_hash_ops; |
4980 | struct ftrace_func_entry *entry; |
4981 | struct ftrace_glob func_g; |
4982 | struct ftrace_hash **orig_hash; |
4983 | struct ftrace_hash *old_hash; |
4984 | struct ftrace_hash *hash = NULL; |
4985 | struct hlist_node *tmp; |
4986 | struct hlist_head hhd; |
4987 | char str[KSYM_SYMBOL_LEN]; |
4988 | int count = 0; |
4989 | int i, ret = -ENODEV; |
4990 | int size; |
4991 | |
4992 | if (!glob || !strlen(glob) || !strcmp(glob, "*" )) |
4993 | func_g.search = NULL; |
4994 | else { |
4995 | int not; |
4996 | |
4997 | func_g.type = filter_parse_regex(buff: glob, strlen(glob), |
4998 | search: &func_g.search, not: ¬); |
4999 | func_g.len = strlen(func_g.search); |
5000 | |
5001 | /* we do not support '!' for function probes */ |
5002 | if (WARN_ON(not)) |
5003 | return -EINVAL; |
5004 | } |
5005 | |
5006 | mutex_lock(&ftrace_lock); |
5007 | /* Check if the probe_ops is already registered */ |
5008 | list_for_each_entry(iter, &tr->func_probes, list) { |
5009 | if (iter->probe_ops == probe_ops) { |
5010 | probe = iter; |
5011 | break; |
5012 | } |
5013 | } |
5014 | if (!probe) |
5015 | goto err_unlock_ftrace; |
5016 | |
5017 | ret = -EINVAL; |
5018 | if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) |
5019 | goto err_unlock_ftrace; |
5020 | |
5021 | acquire_probe_locked(probe); |
5022 | |
5023 | mutex_unlock(lock: &ftrace_lock); |
5024 | |
5025 | mutex_lock(&probe->ops.func_hash->regex_lock); |
5026 | |
5027 | orig_hash = &probe->ops.func_hash->filter_hash; |
5028 | old_hash = *orig_hash; |
5029 | |
5030 | if (ftrace_hash_empty(hash: old_hash)) |
5031 | goto out_unlock; |
5032 | |
5033 | old_hash_ops.filter_hash = old_hash; |
5034 | /* Probes only have filters */ |
5035 | old_hash_ops.notrace_hash = NULL; |
5036 | |
5037 | ret = -ENOMEM; |
5038 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: old_hash); |
5039 | if (!hash) |
5040 | goto out_unlock; |
5041 | |
5042 | INIT_HLIST_HEAD(&hhd); |
5043 | |
5044 | size = 1 << hash->size_bits; |
5045 | for (i = 0; i < size; i++) { |
5046 | hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { |
5047 | |
5048 | if (func_g.search) { |
5049 | kallsyms_lookup(addr: entry->ip, NULL, NULL, |
5050 | NULL, namebuf: str); |
5051 | if (!ftrace_match(str, g: &func_g)) |
5052 | continue; |
5053 | } |
5054 | count++; |
5055 | remove_hash_entry(hash, entry); |
5056 | hlist_add_head(n: &entry->hlist, h: &hhd); |
5057 | } |
5058 | } |
5059 | |
5060 | /* Nothing found? */ |
5061 | if (!count) { |
5062 | ret = -EINVAL; |
5063 | goto out_unlock; |
5064 | } |
5065 | |
5066 | mutex_lock(&ftrace_lock); |
5067 | |
5068 | WARN_ON(probe->ref < count); |
5069 | |
5070 | probe->ref -= count; |
5071 | |
5072 | if (ftrace_hash_empty(hash)) |
5073 | ftrace_shutdown(ops: &probe->ops, command: 0); |
5074 | |
5075 | ret = ftrace_hash_move_and_update_ops(ops: &probe->ops, orig_hash, |
5076 | hash, enable: 1); |
5077 | |
5078 | /* still need to update the function call sites */ |
5079 | if (ftrace_enabled && !ftrace_hash_empty(hash)) |
5080 | ftrace_run_modify_code(ops: &probe->ops, command: FTRACE_UPDATE_CALLS, |
5081 | old_hash: &old_hash_ops); |
5082 | synchronize_rcu(); |
5083 | |
5084 | hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { |
5085 | hlist_del(n: &entry->hlist); |
5086 | if (probe_ops->free) |
5087 | probe_ops->free(probe_ops, tr, entry->ip, probe->data); |
5088 | kfree(objp: entry); |
5089 | } |
5090 | mutex_unlock(lock: &ftrace_lock); |
5091 | |
5092 | out_unlock: |
5093 | mutex_unlock(lock: &probe->ops.func_hash->regex_lock); |
5094 | free_ftrace_hash(hash); |
5095 | |
5096 | release_probe(probe); |
5097 | |
5098 | return ret; |
5099 | |
5100 | err_unlock_ftrace: |
5101 | mutex_unlock(lock: &ftrace_lock); |
5102 | return ret; |
5103 | } |
5104 | |
5105 | void clear_ftrace_function_probes(struct trace_array *tr) |
5106 | { |
5107 | struct ftrace_func_probe *probe, *n; |
5108 | |
5109 | list_for_each_entry_safe(probe, n, &tr->func_probes, list) |
5110 | unregister_ftrace_function_probe_func(NULL, tr, probe_ops: probe->probe_ops); |
5111 | } |
5112 | |
5113 | static LIST_HEAD(ftrace_commands); |
5114 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
5115 | |
5116 | /* |
5117 | * Currently we only register ftrace commands from __init, so mark this |
5118 | * __init too. |
5119 | */ |
5120 | __init int register_ftrace_command(struct ftrace_func_command *cmd) |
5121 | { |
5122 | struct ftrace_func_command *p; |
5123 | int ret = 0; |
5124 | |
5125 | mutex_lock(&ftrace_cmd_mutex); |
5126 | list_for_each_entry(p, &ftrace_commands, list) { |
5127 | if (strcmp(cmd->name, p->name) == 0) { |
5128 | ret = -EBUSY; |
5129 | goto out_unlock; |
5130 | } |
5131 | } |
5132 | list_add(new: &cmd->list, head: &ftrace_commands); |
5133 | out_unlock: |
5134 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5135 | |
5136 | return ret; |
5137 | } |
5138 | |
5139 | /* |
5140 | * Currently we only unregister ftrace commands from __init, so mark |
5141 | * this __init too. |
5142 | */ |
5143 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) |
5144 | { |
5145 | struct ftrace_func_command *p, *n; |
5146 | int ret = -ENODEV; |
5147 | |
5148 | mutex_lock(&ftrace_cmd_mutex); |
5149 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { |
5150 | if (strcmp(cmd->name, p->name) == 0) { |
5151 | ret = 0; |
5152 | list_del_init(entry: &p->list); |
5153 | goto out_unlock; |
5154 | } |
5155 | } |
5156 | out_unlock: |
5157 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5158 | |
5159 | return ret; |
5160 | } |
5161 | |
5162 | static int ftrace_process_regex(struct ftrace_iterator *iter, |
5163 | char *buff, int len, int enable) |
5164 | { |
5165 | struct ftrace_hash *hash = iter->hash; |
5166 | struct trace_array *tr = iter->ops->private; |
5167 | char *func, *command, *next = buff; |
5168 | struct ftrace_func_command *p; |
5169 | int ret = -EINVAL; |
5170 | |
5171 | func = strsep(&next, ":" ); |
5172 | |
5173 | if (!next) { |
5174 | ret = ftrace_match_records(hash, buff: func, len); |
5175 | if (!ret) |
5176 | ret = -EINVAL; |
5177 | if (ret < 0) |
5178 | return ret; |
5179 | return 0; |
5180 | } |
5181 | |
5182 | /* command found */ |
5183 | |
5184 | command = strsep(&next, ":" ); |
5185 | |
5186 | mutex_lock(&ftrace_cmd_mutex); |
5187 | list_for_each_entry(p, &ftrace_commands, list) { |
5188 | if (strcmp(p->name, command) == 0) { |
5189 | ret = p->func(tr, hash, func, command, next, enable); |
5190 | goto out_unlock; |
5191 | } |
5192 | } |
5193 | out_unlock: |
5194 | mutex_unlock(lock: &ftrace_cmd_mutex); |
5195 | |
5196 | return ret; |
5197 | } |
5198 | |
5199 | static ssize_t |
5200 | ftrace_regex_write(struct file *file, const char __user *ubuf, |
5201 | size_t cnt, loff_t *ppos, int enable) |
5202 | { |
5203 | struct ftrace_iterator *iter; |
5204 | struct trace_parser *parser; |
5205 | ssize_t ret, read; |
5206 | |
5207 | if (!cnt) |
5208 | return 0; |
5209 | |
5210 | if (file->f_mode & FMODE_READ) { |
5211 | struct seq_file *m = file->private_data; |
5212 | iter = m->private; |
5213 | } else |
5214 | iter = file->private_data; |
5215 | |
5216 | if (unlikely(ftrace_disabled)) |
5217 | return -ENODEV; |
5218 | |
5219 | /* iter->hash is a local copy, so we don't need regex_lock */ |
5220 | |
5221 | parser = &iter->parser; |
5222 | read = trace_get_user(parser, ubuf, cnt, ppos); |
5223 | |
5224 | if (read >= 0 && trace_parser_loaded(parser) && |
5225 | !trace_parser_cont(parser)) { |
5226 | ret = ftrace_process_regex(iter, buff: parser->buffer, |
5227 | len: parser->idx, enable); |
5228 | trace_parser_clear(parser); |
5229 | if (ret < 0) |
5230 | goto out; |
5231 | } |
5232 | |
5233 | ret = read; |
5234 | out: |
5235 | return ret; |
5236 | } |
5237 | |
5238 | ssize_t |
5239 | ftrace_filter_write(struct file *file, const char __user *ubuf, |
5240 | size_t cnt, loff_t *ppos) |
5241 | { |
5242 | return ftrace_regex_write(file, ubuf, cnt, ppos, enable: 1); |
5243 | } |
5244 | |
5245 | ssize_t |
5246 | ftrace_notrace_write(struct file *file, const char __user *ubuf, |
5247 | size_t cnt, loff_t *ppos) |
5248 | { |
5249 | return ftrace_regex_write(file, ubuf, cnt, ppos, enable: 0); |
5250 | } |
5251 | |
5252 | static int |
5253 | __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) |
5254 | { |
5255 | struct ftrace_func_entry *entry; |
5256 | |
5257 | ip = ftrace_location(ip); |
5258 | if (!ip) |
5259 | return -EINVAL; |
5260 | |
5261 | if (remove) { |
5262 | entry = ftrace_lookup_ip(hash, ip); |
5263 | if (!entry) |
5264 | return -ENOENT; |
5265 | free_hash_entry(hash, entry); |
5266 | return 0; |
5267 | } |
5268 | |
5269 | return add_hash_entry(hash, ip); |
5270 | } |
5271 | |
5272 | static int |
5273 | ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips, |
5274 | unsigned int cnt, int remove) |
5275 | { |
5276 | unsigned int i; |
5277 | int err; |
5278 | |
5279 | for (i = 0; i < cnt; i++) { |
5280 | err = __ftrace_match_addr(hash, ip: ips[i], remove); |
5281 | if (err) { |
5282 | /* |
5283 | * This expects the @hash is a temporary hash and if this |
5284 | * fails the caller must free the @hash. |
5285 | */ |
5286 | return err; |
5287 | } |
5288 | } |
5289 | return 0; |
5290 | } |
5291 | |
5292 | static int |
5293 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
5294 | unsigned long *ips, unsigned int cnt, |
5295 | int remove, int reset, int enable) |
5296 | { |
5297 | struct ftrace_hash **orig_hash; |
5298 | struct ftrace_hash *hash; |
5299 | int ret; |
5300 | |
5301 | if (unlikely(ftrace_disabled)) |
5302 | return -ENODEV; |
5303 | |
5304 | mutex_lock(&ops->func_hash->regex_lock); |
5305 | |
5306 | if (enable) |
5307 | orig_hash = &ops->func_hash->filter_hash; |
5308 | else |
5309 | orig_hash = &ops->func_hash->notrace_hash; |
5310 | |
5311 | if (reset) |
5312 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
5313 | else |
5314 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash: *orig_hash); |
5315 | |
5316 | if (!hash) { |
5317 | ret = -ENOMEM; |
5318 | goto out_regex_unlock; |
5319 | } |
5320 | |
5321 | if (buf && !ftrace_match_records(hash, buff: buf, len)) { |
5322 | ret = -EINVAL; |
5323 | goto out_regex_unlock; |
5324 | } |
5325 | if (ips) { |
5326 | ret = ftrace_match_addr(hash, ips, cnt, remove); |
5327 | if (ret < 0) |
5328 | goto out_regex_unlock; |
5329 | } |
5330 | |
5331 | mutex_lock(&ftrace_lock); |
5332 | ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); |
5333 | mutex_unlock(lock: &ftrace_lock); |
5334 | |
5335 | out_regex_unlock: |
5336 | mutex_unlock(lock: &ops->func_hash->regex_lock); |
5337 | |
5338 | free_ftrace_hash(hash); |
5339 | return ret; |
5340 | } |
5341 | |
5342 | static int |
5343 | ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt, |
5344 | int remove, int reset, int enable) |
5345 | { |
5346 | return ftrace_set_hash(ops, NULL, len: 0, ips, cnt, remove, reset, enable); |
5347 | } |
5348 | |
5349 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
5350 | |
5351 | struct ftrace_direct_func { |
5352 | struct list_head next; |
5353 | unsigned long addr; |
5354 | int count; |
5355 | }; |
5356 | |
5357 | static LIST_HEAD(ftrace_direct_funcs); |
5358 | |
5359 | static int register_ftrace_function_nolock(struct ftrace_ops *ops); |
5360 | |
5361 | #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) |
5362 | |
5363 | static int check_direct_multi(struct ftrace_ops *ops) |
5364 | { |
5365 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) |
5366 | return -EINVAL; |
5367 | if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS) |
5368 | return -EINVAL; |
5369 | return 0; |
5370 | } |
5371 | |
5372 | static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) |
5373 | { |
5374 | struct ftrace_func_entry *entry, *del; |
5375 | int size, i; |
5376 | |
5377 | size = 1 << hash->size_bits; |
5378 | for (i = 0; i < size; i++) { |
5379 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5380 | del = __ftrace_lookup_ip(hash: direct_functions, ip: entry->ip); |
5381 | if (del && del->direct == addr) { |
5382 | remove_hash_entry(hash: direct_functions, entry: del); |
5383 | kfree(objp: del); |
5384 | } |
5385 | } |
5386 | } |
5387 | } |
5388 | |
5389 | /** |
5390 | * register_ftrace_direct - Call a custom trampoline directly |
5391 | * for multiple functions registered in @ops |
5392 | * @ops: The address of the struct ftrace_ops object |
5393 | * @addr: The address of the trampoline to call at @ops functions |
5394 | * |
5395 | * This is used to connect a direct calls to @addr from the nop locations |
5396 | * of the functions registered in @ops (with by ftrace_set_filter_ip |
5397 | * function). |
5398 | * |
5399 | * The location that it calls (@addr) must be able to handle a direct call, |
5400 | * and save the parameters of the function being traced, and restore them |
5401 | * (or inject new ones if needed), before returning. |
5402 | * |
5403 | * Returns: |
5404 | * 0 on success |
5405 | * -EINVAL - The @ops object was already registered with this call or |
5406 | * when there are no functions in @ops object. |
5407 | * -EBUSY - Another direct function is already attached (there can be only one) |
5408 | * -ENODEV - @ip does not point to a ftrace nop location (or not supported) |
5409 | * -ENOMEM - There was an allocation failure. |
5410 | */ |
5411 | int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5412 | { |
5413 | struct ftrace_hash *hash, *free_hash = NULL; |
5414 | struct ftrace_func_entry *entry, *new; |
5415 | int err = -EBUSY, size, i; |
5416 | |
5417 | if (ops->func || ops->trampoline) |
5418 | return -EINVAL; |
5419 | if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) |
5420 | return -EINVAL; |
5421 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
5422 | return -EINVAL; |
5423 | |
5424 | hash = ops->func_hash->filter_hash; |
5425 | if (ftrace_hash_empty(hash)) |
5426 | return -EINVAL; |
5427 | |
5428 | mutex_lock(&direct_mutex); |
5429 | |
5430 | /* Make sure requested entries are not already registered.. */ |
5431 | size = 1 << hash->size_bits; |
5432 | for (i = 0; i < size; i++) { |
5433 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5434 | if (ftrace_find_rec_direct(ip: entry->ip)) |
5435 | goto out_unlock; |
5436 | } |
5437 | } |
5438 | |
5439 | /* ... and insert them to direct_functions hash. */ |
5440 | err = -ENOMEM; |
5441 | for (i = 0; i < size; i++) { |
5442 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
5443 | new = ftrace_add_rec_direct(ip: entry->ip, addr, free_hash: &free_hash); |
5444 | if (!new) |
5445 | goto out_remove; |
5446 | entry->direct = addr; |
5447 | } |
5448 | } |
5449 | |
5450 | ops->func = call_direct_funcs; |
5451 | ops->flags = MULTI_FLAGS; |
5452 | ops->trampoline = FTRACE_REGS_ADDR; |
5453 | ops->direct_call = addr; |
5454 | |
5455 | err = register_ftrace_function_nolock(ops); |
5456 | |
5457 | out_remove: |
5458 | if (err) |
5459 | remove_direct_functions_hash(hash, addr); |
5460 | |
5461 | out_unlock: |
5462 | mutex_unlock(lock: &direct_mutex); |
5463 | |
5464 | if (free_hash) { |
5465 | synchronize_rcu_tasks(); |
5466 | free_ftrace_hash(hash: free_hash); |
5467 | } |
5468 | return err; |
5469 | } |
5470 | EXPORT_SYMBOL_GPL(register_ftrace_direct); |
5471 | |
5472 | /** |
5473 | * unregister_ftrace_direct - Remove calls to custom trampoline |
5474 | * previously registered by register_ftrace_direct for @ops object. |
5475 | * @ops: The address of the struct ftrace_ops object |
5476 | * |
5477 | * This is used to remove a direct calls to @addr from the nop locations |
5478 | * of the functions registered in @ops (with by ftrace_set_filter_ip |
5479 | * function). |
5480 | * |
5481 | * Returns: |
5482 | * 0 on success |
5483 | * -EINVAL - The @ops object was not properly registered. |
5484 | */ |
5485 | int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, |
5486 | bool free_filters) |
5487 | { |
5488 | struct ftrace_hash *hash = ops->func_hash->filter_hash; |
5489 | int err; |
5490 | |
5491 | if (check_direct_multi(ops)) |
5492 | return -EINVAL; |
5493 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5494 | return -EINVAL; |
5495 | |
5496 | mutex_lock(&direct_mutex); |
5497 | err = unregister_ftrace_function(ops); |
5498 | remove_direct_functions_hash(hash, addr); |
5499 | mutex_unlock(lock: &direct_mutex); |
5500 | |
5501 | /* cleanup for possible another register call */ |
5502 | ops->func = NULL; |
5503 | ops->trampoline = 0; |
5504 | |
5505 | if (free_filters) |
5506 | ftrace_free_filter(ops); |
5507 | return err; |
5508 | } |
5509 | EXPORT_SYMBOL_GPL(unregister_ftrace_direct); |
5510 | |
5511 | static int |
5512 | __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5513 | { |
5514 | struct ftrace_hash *hash; |
5515 | struct ftrace_func_entry *entry, *iter; |
5516 | static struct ftrace_ops tmp_ops = { |
5517 | .func = ftrace_stub, |
5518 | .flags = FTRACE_OPS_FL_STUB, |
5519 | }; |
5520 | int i, size; |
5521 | int err; |
5522 | |
5523 | lockdep_assert_held_once(&direct_mutex); |
5524 | |
5525 | /* Enable the tmp_ops to have the same functions as the direct ops */ |
5526 | ftrace_ops_init(ops: &tmp_ops); |
5527 | tmp_ops.func_hash = ops->func_hash; |
5528 | tmp_ops.direct_call = addr; |
5529 | |
5530 | err = register_ftrace_function_nolock(ops: &tmp_ops); |
5531 | if (err) |
5532 | return err; |
5533 | |
5534 | /* |
5535 | * Now the ftrace_ops_list_func() is called to do the direct callers. |
5536 | * We can safely change the direct functions attached to each entry. |
5537 | */ |
5538 | mutex_lock(&ftrace_lock); |
5539 | |
5540 | hash = ops->func_hash->filter_hash; |
5541 | size = 1 << hash->size_bits; |
5542 | for (i = 0; i < size; i++) { |
5543 | hlist_for_each_entry(iter, &hash->buckets[i], hlist) { |
5544 | entry = __ftrace_lookup_ip(hash: direct_functions, ip: iter->ip); |
5545 | if (!entry) |
5546 | continue; |
5547 | entry->direct = addr; |
5548 | } |
5549 | } |
5550 | /* Prevent store tearing if a trampoline concurrently accesses the value */ |
5551 | WRITE_ONCE(ops->direct_call, addr); |
5552 | |
5553 | mutex_unlock(lock: &ftrace_lock); |
5554 | |
5555 | /* Removing the tmp_ops will add the updated direct callers to the functions */ |
5556 | unregister_ftrace_function(ops: &tmp_ops); |
5557 | |
5558 | return err; |
5559 | } |
5560 | |
5561 | /** |
5562 | * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call |
5563 | * to call something else |
5564 | * @ops: The address of the struct ftrace_ops object |
5565 | * @addr: The address of the new trampoline to call at @ops functions |
5566 | * |
5567 | * This is used to unregister currently registered direct caller and |
5568 | * register new one @addr on functions registered in @ops object. |
5569 | * |
5570 | * Note there's window between ftrace_shutdown and ftrace_startup calls |
5571 | * where there will be no callbacks called. |
5572 | * |
5573 | * Caller should already have direct_mutex locked, so we don't lock |
5574 | * direct_mutex here. |
5575 | * |
5576 | * Returns: zero on success. Non zero on error, which includes: |
5577 | * -EINVAL - The @ops object was not properly registered. |
5578 | */ |
5579 | int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) |
5580 | { |
5581 | if (check_direct_multi(ops)) |
5582 | return -EINVAL; |
5583 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5584 | return -EINVAL; |
5585 | |
5586 | return __modify_ftrace_direct(ops, addr); |
5587 | } |
5588 | EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); |
5589 | |
5590 | /** |
5591 | * modify_ftrace_direct - Modify an existing direct 'multi' call |
5592 | * to call something else |
5593 | * @ops: The address of the struct ftrace_ops object |
5594 | * @addr: The address of the new trampoline to call at @ops functions |
5595 | * |
5596 | * This is used to unregister currently registered direct caller and |
5597 | * register new one @addr on functions registered in @ops object. |
5598 | * |
5599 | * Note there's window between ftrace_shutdown and ftrace_startup calls |
5600 | * where there will be no callbacks called. |
5601 | * |
5602 | * Returns: zero on success. Non zero on error, which includes: |
5603 | * -EINVAL - The @ops object was not properly registered. |
5604 | */ |
5605 | int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) |
5606 | { |
5607 | int err; |
5608 | |
5609 | if (check_direct_multi(ops)) |
5610 | return -EINVAL; |
5611 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
5612 | return -EINVAL; |
5613 | |
5614 | mutex_lock(&direct_mutex); |
5615 | err = __modify_ftrace_direct(ops, addr); |
5616 | mutex_unlock(lock: &direct_mutex); |
5617 | return err; |
5618 | } |
5619 | EXPORT_SYMBOL_GPL(modify_ftrace_direct); |
5620 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
5621 | |
5622 | /** |
5623 | * ftrace_set_filter_ip - set a function to filter on in ftrace by address |
5624 | * @ops - the ops to set the filter with |
5625 | * @ip - the address to add to or remove from the filter. |
5626 | * @remove - non zero to remove the ip from the filter |
5627 | * @reset - non zero to reset all filters before applying this filter. |
5628 | * |
5629 | * Filters denote which functions should be enabled when tracing is enabled |
5630 | * If @ip is NULL, it fails to update filter. |
5631 | * |
5632 | * This can allocate memory which must be freed before @ops can be freed, |
5633 | * either by removing each filtered addr or by using |
5634 | * ftrace_free_filter(@ops). |
5635 | */ |
5636 | int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, |
5637 | int remove, int reset) |
5638 | { |
5639 | ftrace_ops_init(ops); |
5640 | return ftrace_set_addr(ops, ips: &ip, cnt: 1, remove, reset, enable: 1); |
5641 | } |
5642 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); |
5643 | |
5644 | /** |
5645 | * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses |
5646 | * @ops - the ops to set the filter with |
5647 | * @ips - the array of addresses to add to or remove from the filter. |
5648 | * @cnt - the number of addresses in @ips |
5649 | * @remove - non zero to remove ips from the filter |
5650 | * @reset - non zero to reset all filters before applying this filter. |
5651 | * |
5652 | * Filters denote which functions should be enabled when tracing is enabled |
5653 | * If @ips array or any ip specified within is NULL , it fails to update filter. |
5654 | * |
5655 | * This can allocate memory which must be freed before @ops can be freed, |
5656 | * either by removing each filtered addr or by using |
5657 | * ftrace_free_filter(@ops). |
5658 | */ |
5659 | int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, |
5660 | unsigned int cnt, int remove, int reset) |
5661 | { |
5662 | ftrace_ops_init(ops); |
5663 | return ftrace_set_addr(ops, ips, cnt, remove, reset, enable: 1); |
5664 | } |
5665 | EXPORT_SYMBOL_GPL(ftrace_set_filter_ips); |
5666 | |
5667 | /** |
5668 | * ftrace_ops_set_global_filter - setup ops to use global filters |
5669 | * @ops - the ops which will use the global filters |
5670 | * |
5671 | * ftrace users who need global function trace filtering should call this. |
5672 | * It can set the global filter only if ops were not initialized before. |
5673 | */ |
5674 | void ftrace_ops_set_global_filter(struct ftrace_ops *ops) |
5675 | { |
5676 | if (ops->flags & FTRACE_OPS_FL_INITIALIZED) |
5677 | return; |
5678 | |
5679 | ftrace_ops_init(ops); |
5680 | ops->func_hash = &global_ops.local_hash; |
5681 | } |
5682 | EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); |
5683 | |
5684 | static int |
5685 | ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, |
5686 | int reset, int enable) |
5687 | { |
5688 | return ftrace_set_hash(ops, buf, len, NULL, cnt: 0, remove: 0, reset, enable); |
5689 | } |
5690 | |
5691 | /** |
5692 | * ftrace_set_filter - set a function to filter on in ftrace |
5693 | * @ops - the ops to set the filter with |
5694 | * @buf - the string that holds the function filter text. |
5695 | * @len - the length of the string. |
5696 | * @reset - non zero to reset all filters before applying this filter. |
5697 | * |
5698 | * Filters denote which functions should be enabled when tracing is enabled. |
5699 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
5700 | * |
5701 | * This can allocate memory which must be freed before @ops can be freed, |
5702 | * either by removing each filtered addr or by using |
5703 | * ftrace_free_filter(@ops). |
5704 | */ |
5705 | int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
5706 | int len, int reset) |
5707 | { |
5708 | ftrace_ops_init(ops); |
5709 | return ftrace_set_regex(ops, buf, len, reset, enable: 1); |
5710 | } |
5711 | EXPORT_SYMBOL_GPL(ftrace_set_filter); |
5712 | |
5713 | /** |
5714 | * ftrace_set_notrace - set a function to not trace in ftrace |
5715 | * @ops - the ops to set the notrace filter with |
5716 | * @buf - the string that holds the function notrace text. |
5717 | * @len - the length of the string. |
5718 | * @reset - non zero to reset all filters before applying this filter. |
5719 | * |
5720 | * Notrace Filters denote which functions should not be enabled when tracing |
5721 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
5722 | * for tracing. |
5723 | * |
5724 | * This can allocate memory which must be freed before @ops can be freed, |
5725 | * either by removing each filtered addr or by using |
5726 | * ftrace_free_filter(@ops). |
5727 | */ |
5728 | int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
5729 | int len, int reset) |
5730 | { |
5731 | ftrace_ops_init(ops); |
5732 | return ftrace_set_regex(ops, buf, len, reset, enable: 0); |
5733 | } |
5734 | EXPORT_SYMBOL_GPL(ftrace_set_notrace); |
5735 | /** |
5736 | * ftrace_set_global_filter - set a function to filter on with global tracers |
5737 | * @buf - the string that holds the function filter text. |
5738 | * @len - the length of the string. |
5739 | * @reset - non zero to reset all filters before applying this filter. |
5740 | * |
5741 | * Filters denote which functions should be enabled when tracing is enabled. |
5742 | * If @buf is NULL and reset is set, all functions will be enabled for tracing. |
5743 | */ |
5744 | void ftrace_set_global_filter(unsigned char *buf, int len, int reset) |
5745 | { |
5746 | ftrace_set_regex(ops: &global_ops, buf, len, reset, enable: 1); |
5747 | } |
5748 | EXPORT_SYMBOL_GPL(ftrace_set_global_filter); |
5749 | |
5750 | /** |
5751 | * ftrace_set_global_notrace - set a function to not trace with global tracers |
5752 | * @buf - the string that holds the function notrace text. |
5753 | * @len - the length of the string. |
5754 | * @reset - non zero to reset all filters before applying this filter. |
5755 | * |
5756 | * Notrace Filters denote which functions should not be enabled when tracing |
5757 | * is enabled. If @buf is NULL and reset is set, all functions will be enabled |
5758 | * for tracing. |
5759 | */ |
5760 | void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) |
5761 | { |
5762 | ftrace_set_regex(ops: &global_ops, buf, len, reset, enable: 0); |
5763 | } |
5764 | EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); |
5765 | |
5766 | /* |
5767 | * command line interface to allow users to set filters on boot up. |
5768 | */ |
5769 | #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE |
5770 | static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
5771 | static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; |
5772 | |
5773 | /* Used by function selftest to not test if filter is set */ |
5774 | bool ftrace_filter_param __initdata; |
5775 | |
5776 | static int __init set_ftrace_notrace(char *str) |
5777 | { |
5778 | ftrace_filter_param = true; |
5779 | strscpy(p: ftrace_notrace_buf, q: str, FTRACE_FILTER_SIZE); |
5780 | return 1; |
5781 | } |
5782 | __setup("ftrace_notrace=" , set_ftrace_notrace); |
5783 | |
5784 | static int __init set_ftrace_filter(char *str) |
5785 | { |
5786 | ftrace_filter_param = true; |
5787 | strscpy(p: ftrace_filter_buf, q: str, FTRACE_FILTER_SIZE); |
5788 | return 1; |
5789 | } |
5790 | __setup("ftrace_filter=" , set_ftrace_filter); |
5791 | |
5792 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5793 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
5794 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
5795 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
5796 | |
5797 | static int __init set_graph_function(char *str) |
5798 | { |
5799 | strscpy(p: ftrace_graph_buf, q: str, FTRACE_FILTER_SIZE); |
5800 | return 1; |
5801 | } |
5802 | __setup("ftrace_graph_filter=" , set_graph_function); |
5803 | |
5804 | static int __init set_graph_notrace_function(char *str) |
5805 | { |
5806 | strscpy(p: ftrace_graph_notrace_buf, q: str, FTRACE_FILTER_SIZE); |
5807 | return 1; |
5808 | } |
5809 | __setup("ftrace_graph_notrace=" , set_graph_notrace_function); |
5810 | |
5811 | static int __init set_graph_max_depth_function(char *str) |
5812 | { |
5813 | if (!str) |
5814 | return 0; |
5815 | fgraph_max_depth = simple_strtoul(str, NULL, 0); |
5816 | return 1; |
5817 | } |
5818 | __setup("ftrace_graph_max_depth=" , set_graph_max_depth_function); |
5819 | |
5820 | static void __init set_ftrace_early_graph(char *buf, int enable) |
5821 | { |
5822 | int ret; |
5823 | char *func; |
5824 | struct ftrace_hash *hash; |
5825 | |
5826 | hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); |
5827 | if (MEM_FAIL(!hash, "Failed to allocate hash\n" )) |
5828 | return; |
5829 | |
5830 | while (buf) { |
5831 | func = strsep(&buf, "," ); |
5832 | /* we allow only one expression at a time */ |
5833 | ret = ftrace_graph_set_hash(hash, buffer: func); |
5834 | if (ret) |
5835 | printk(KERN_DEBUG "ftrace: function %s not " |
5836 | "traceable\n" , func); |
5837 | } |
5838 | |
5839 | if (enable) |
5840 | ftrace_graph_hash = hash; |
5841 | else |
5842 | ftrace_graph_notrace_hash = hash; |
5843 | } |
5844 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5845 | |
5846 | void __init |
5847 | ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) |
5848 | { |
5849 | char *func; |
5850 | |
5851 | ftrace_ops_init(ops); |
5852 | |
5853 | while (buf) { |
5854 | func = strsep(&buf, "," ); |
5855 | ftrace_set_regex(ops, buf: func, strlen(func), reset: 0, enable); |
5856 | } |
5857 | } |
5858 | |
5859 | static void __init set_ftrace_early_filters(void) |
5860 | { |
5861 | if (ftrace_filter_buf[0]) |
5862 | ftrace_set_early_filter(ops: &global_ops, buf: ftrace_filter_buf, enable: 1); |
5863 | if (ftrace_notrace_buf[0]) |
5864 | ftrace_set_early_filter(ops: &global_ops, buf: ftrace_notrace_buf, enable: 0); |
5865 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5866 | if (ftrace_graph_buf[0]) |
5867 | set_ftrace_early_graph(buf: ftrace_graph_buf, enable: 1); |
5868 | if (ftrace_graph_notrace_buf[0]) |
5869 | set_ftrace_early_graph(buf: ftrace_graph_notrace_buf, enable: 0); |
5870 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
5871 | } |
5872 | |
5873 | int ftrace_regex_release(struct inode *inode, struct file *file) |
5874 | { |
5875 | struct seq_file *m = (struct seq_file *)file->private_data; |
5876 | struct ftrace_iterator *iter; |
5877 | struct ftrace_hash **orig_hash; |
5878 | struct trace_parser *parser; |
5879 | int filter_hash; |
5880 | |
5881 | if (file->f_mode & FMODE_READ) { |
5882 | iter = m->private; |
5883 | seq_release(inode, file); |
5884 | } else |
5885 | iter = file->private_data; |
5886 | |
5887 | parser = &iter->parser; |
5888 | if (trace_parser_loaded(parser)) { |
5889 | int enable = !(iter->flags & FTRACE_ITER_NOTRACE); |
5890 | |
5891 | ftrace_process_regex(iter, buff: parser->buffer, |
5892 | len: parser->idx, enable); |
5893 | } |
5894 | |
5895 | trace_parser_put(parser); |
5896 | |
5897 | mutex_lock(&iter->ops->func_hash->regex_lock); |
5898 | |
5899 | if (file->f_mode & FMODE_WRITE) { |
5900 | filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); |
5901 | |
5902 | if (filter_hash) { |
5903 | orig_hash = &iter->ops->func_hash->filter_hash; |
5904 | if (iter->tr) { |
5905 | if (list_empty(head: &iter->tr->mod_trace)) |
5906 | iter->hash->flags &= ~FTRACE_HASH_FL_MOD; |
5907 | else |
5908 | iter->hash->flags |= FTRACE_HASH_FL_MOD; |
5909 | } |
5910 | } else |
5911 | orig_hash = &iter->ops->func_hash->notrace_hash; |
5912 | |
5913 | mutex_lock(&ftrace_lock); |
5914 | ftrace_hash_move_and_update_ops(ops: iter->ops, orig_hash, |
5915 | hash: iter->hash, enable: filter_hash); |
5916 | mutex_unlock(lock: &ftrace_lock); |
5917 | } else { |
5918 | /* For read only, the hash is the ops hash */ |
5919 | iter->hash = NULL; |
5920 | } |
5921 | |
5922 | mutex_unlock(lock: &iter->ops->func_hash->regex_lock); |
5923 | free_ftrace_hash(hash: iter->hash); |
5924 | if (iter->tr) |
5925 | trace_array_put(tr: iter->tr); |
5926 | kfree(objp: iter); |
5927 | |
5928 | return 0; |
5929 | } |
5930 | |
5931 | static const struct file_operations ftrace_avail_fops = { |
5932 | .open = ftrace_avail_open, |
5933 | .read = seq_read, |
5934 | .llseek = seq_lseek, |
5935 | .release = seq_release_private, |
5936 | }; |
5937 | |
5938 | static const struct file_operations ftrace_enabled_fops = { |
5939 | .open = ftrace_enabled_open, |
5940 | .read = seq_read, |
5941 | .llseek = seq_lseek, |
5942 | .release = seq_release_private, |
5943 | }; |
5944 | |
5945 | static const struct file_operations ftrace_touched_fops = { |
5946 | .open = ftrace_touched_open, |
5947 | .read = seq_read, |
5948 | .llseek = seq_lseek, |
5949 | .release = seq_release_private, |
5950 | }; |
5951 | |
5952 | static const struct file_operations ftrace_avail_addrs_fops = { |
5953 | .open = ftrace_avail_addrs_open, |
5954 | .read = seq_read, |
5955 | .llseek = seq_lseek, |
5956 | .release = seq_release_private, |
5957 | }; |
5958 | |
5959 | static const struct file_operations ftrace_filter_fops = { |
5960 | .open = ftrace_filter_open, |
5961 | .read = seq_read, |
5962 | .write = ftrace_filter_write, |
5963 | .llseek = tracing_lseek, |
5964 | .release = ftrace_regex_release, |
5965 | }; |
5966 | |
5967 | static const struct file_operations ftrace_notrace_fops = { |
5968 | .open = ftrace_notrace_open, |
5969 | .read = seq_read, |
5970 | .write = ftrace_notrace_write, |
5971 | .llseek = tracing_lseek, |
5972 | .release = ftrace_regex_release, |
5973 | }; |
5974 | |
5975 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
5976 | |
5977 | static DEFINE_MUTEX(graph_lock); |
5978 | |
5979 | struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; |
5980 | struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; |
5981 | |
5982 | enum graph_filter_type { |
5983 | GRAPH_FILTER_NOTRACE = 0, |
5984 | GRAPH_FILTER_FUNCTION, |
5985 | }; |
5986 | |
5987 | #define FTRACE_GRAPH_EMPTY ((void *)1) |
5988 | |
5989 | struct ftrace_graph_data { |
5990 | struct ftrace_hash *hash; |
5991 | struct ftrace_func_entry *entry; |
5992 | int idx; /* for hash table iteration */ |
5993 | enum graph_filter_type type; |
5994 | struct ftrace_hash *new_hash; |
5995 | const struct seq_operations *seq_ops; |
5996 | struct trace_parser parser; |
5997 | }; |
5998 | |
5999 | static void * |
6000 | __g_next(struct seq_file *m, loff_t *pos) |
6001 | { |
6002 | struct ftrace_graph_data *fgd = m->private; |
6003 | struct ftrace_func_entry *entry = fgd->entry; |
6004 | struct hlist_head *head; |
6005 | int i, idx = fgd->idx; |
6006 | |
6007 | if (*pos >= fgd->hash->count) |
6008 | return NULL; |
6009 | |
6010 | if (entry) { |
6011 | hlist_for_each_entry_continue(entry, hlist) { |
6012 | fgd->entry = entry; |
6013 | return entry; |
6014 | } |
6015 | |
6016 | idx++; |
6017 | } |
6018 | |
6019 | for (i = idx; i < 1 << fgd->hash->size_bits; i++) { |
6020 | head = &fgd->hash->buckets[i]; |
6021 | hlist_for_each_entry(entry, head, hlist) { |
6022 | fgd->entry = entry; |
6023 | fgd->idx = i; |
6024 | return entry; |
6025 | } |
6026 | } |
6027 | return NULL; |
6028 | } |
6029 | |
6030 | static void * |
6031 | g_next(struct seq_file *m, void *v, loff_t *pos) |
6032 | { |
6033 | (*pos)++; |
6034 | return __g_next(m, pos); |
6035 | } |
6036 | |
6037 | static void *g_start(struct seq_file *m, loff_t *pos) |
6038 | { |
6039 | struct ftrace_graph_data *fgd = m->private; |
6040 | |
6041 | mutex_lock(&graph_lock); |
6042 | |
6043 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
6044 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
6045 | lockdep_is_held(&graph_lock)); |
6046 | else |
6047 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6048 | lockdep_is_held(&graph_lock)); |
6049 | |
6050 | /* Nothing, tell g_show to print all functions are enabled */ |
6051 | if (ftrace_hash_empty(hash: fgd->hash) && !*pos) |
6052 | return FTRACE_GRAPH_EMPTY; |
6053 | |
6054 | fgd->idx = 0; |
6055 | fgd->entry = NULL; |
6056 | return __g_next(m, pos); |
6057 | } |
6058 | |
6059 | static void g_stop(struct seq_file *m, void *p) |
6060 | { |
6061 | mutex_unlock(lock: &graph_lock); |
6062 | } |
6063 | |
6064 | static int g_show(struct seq_file *m, void *v) |
6065 | { |
6066 | struct ftrace_func_entry *entry = v; |
6067 | |
6068 | if (!entry) |
6069 | return 0; |
6070 | |
6071 | if (entry == FTRACE_GRAPH_EMPTY) { |
6072 | struct ftrace_graph_data *fgd = m->private; |
6073 | |
6074 | if (fgd->type == GRAPH_FILTER_FUNCTION) |
6075 | seq_puts(m, s: "#### all functions enabled ####\n" ); |
6076 | else |
6077 | seq_puts(m, s: "#### no functions disabled ####\n" ); |
6078 | return 0; |
6079 | } |
6080 | |
6081 | seq_printf(m, fmt: "%ps\n" , (void *)entry->ip); |
6082 | |
6083 | return 0; |
6084 | } |
6085 | |
6086 | static const struct seq_operations ftrace_graph_seq_ops = { |
6087 | .start = g_start, |
6088 | .next = g_next, |
6089 | .stop = g_stop, |
6090 | .show = g_show, |
6091 | }; |
6092 | |
6093 | static int |
6094 | __ftrace_graph_open(struct inode *inode, struct file *file, |
6095 | struct ftrace_graph_data *fgd) |
6096 | { |
6097 | int ret; |
6098 | struct ftrace_hash *new_hash = NULL; |
6099 | |
6100 | ret = security_locked_down(what: LOCKDOWN_TRACEFS); |
6101 | if (ret) |
6102 | return ret; |
6103 | |
6104 | if (file->f_mode & FMODE_WRITE) { |
6105 | const int size_bits = FTRACE_HASH_DEFAULT_BITS; |
6106 | |
6107 | if (trace_parser_get_init(parser: &fgd->parser, FTRACE_BUFF_MAX)) |
6108 | return -ENOMEM; |
6109 | |
6110 | if (file->f_flags & O_TRUNC) |
6111 | new_hash = alloc_ftrace_hash(size_bits); |
6112 | else |
6113 | new_hash = alloc_and_copy_ftrace_hash(size_bits, |
6114 | hash: fgd->hash); |
6115 | if (!new_hash) { |
6116 | ret = -ENOMEM; |
6117 | goto out; |
6118 | } |
6119 | } |
6120 | |
6121 | if (file->f_mode & FMODE_READ) { |
6122 | ret = seq_open(file, &ftrace_graph_seq_ops); |
6123 | if (!ret) { |
6124 | struct seq_file *m = file->private_data; |
6125 | m->private = fgd; |
6126 | } else { |
6127 | /* Failed */ |
6128 | free_ftrace_hash(hash: new_hash); |
6129 | new_hash = NULL; |
6130 | } |
6131 | } else |
6132 | file->private_data = fgd; |
6133 | |
6134 | out: |
6135 | if (ret < 0 && file->f_mode & FMODE_WRITE) |
6136 | trace_parser_put(parser: &fgd->parser); |
6137 | |
6138 | fgd->new_hash = new_hash; |
6139 | |
6140 | /* |
6141 | * All uses of fgd->hash must be taken with the graph_lock |
6142 | * held. The graph_lock is going to be released, so force |
6143 | * fgd->hash to be reinitialized when it is taken again. |
6144 | */ |
6145 | fgd->hash = NULL; |
6146 | |
6147 | return ret; |
6148 | } |
6149 | |
6150 | static int |
6151 | ftrace_graph_open(struct inode *inode, struct file *file) |
6152 | { |
6153 | struct ftrace_graph_data *fgd; |
6154 | int ret; |
6155 | |
6156 | if (unlikely(ftrace_disabled)) |
6157 | return -ENODEV; |
6158 | |
6159 | fgd = kmalloc(size: sizeof(*fgd), GFP_KERNEL); |
6160 | if (fgd == NULL) |
6161 | return -ENOMEM; |
6162 | |
6163 | mutex_lock(&graph_lock); |
6164 | |
6165 | fgd->hash = rcu_dereference_protected(ftrace_graph_hash, |
6166 | lockdep_is_held(&graph_lock)); |
6167 | fgd->type = GRAPH_FILTER_FUNCTION; |
6168 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6169 | |
6170 | ret = __ftrace_graph_open(inode, file, fgd); |
6171 | if (ret < 0) |
6172 | kfree(objp: fgd); |
6173 | |
6174 | mutex_unlock(lock: &graph_lock); |
6175 | return ret; |
6176 | } |
6177 | |
6178 | static int |
6179 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) |
6180 | { |
6181 | struct ftrace_graph_data *fgd; |
6182 | int ret; |
6183 | |
6184 | if (unlikely(ftrace_disabled)) |
6185 | return -ENODEV; |
6186 | |
6187 | fgd = kmalloc(size: sizeof(*fgd), GFP_KERNEL); |
6188 | if (fgd == NULL) |
6189 | return -ENOMEM; |
6190 | |
6191 | mutex_lock(&graph_lock); |
6192 | |
6193 | fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6194 | lockdep_is_held(&graph_lock)); |
6195 | fgd->type = GRAPH_FILTER_NOTRACE; |
6196 | fgd->seq_ops = &ftrace_graph_seq_ops; |
6197 | |
6198 | ret = __ftrace_graph_open(inode, file, fgd); |
6199 | if (ret < 0) |
6200 | kfree(objp: fgd); |
6201 | |
6202 | mutex_unlock(lock: &graph_lock); |
6203 | return ret; |
6204 | } |
6205 | |
6206 | static int |
6207 | ftrace_graph_release(struct inode *inode, struct file *file) |
6208 | { |
6209 | struct ftrace_graph_data *fgd; |
6210 | struct ftrace_hash *old_hash, *new_hash; |
6211 | struct trace_parser *parser; |
6212 | int ret = 0; |
6213 | |
6214 | if (file->f_mode & FMODE_READ) { |
6215 | struct seq_file *m = file->private_data; |
6216 | |
6217 | fgd = m->private; |
6218 | seq_release(inode, file); |
6219 | } else { |
6220 | fgd = file->private_data; |
6221 | } |
6222 | |
6223 | |
6224 | if (file->f_mode & FMODE_WRITE) { |
6225 | |
6226 | parser = &fgd->parser; |
6227 | |
6228 | if (trace_parser_loaded((parser))) { |
6229 | ret = ftrace_graph_set_hash(hash: fgd->new_hash, |
6230 | buffer: parser->buffer); |
6231 | } |
6232 | |
6233 | trace_parser_put(parser); |
6234 | |
6235 | new_hash = __ftrace_hash_move(src: fgd->new_hash); |
6236 | if (!new_hash) { |
6237 | ret = -ENOMEM; |
6238 | goto out; |
6239 | } |
6240 | |
6241 | mutex_lock(&graph_lock); |
6242 | |
6243 | if (fgd->type == GRAPH_FILTER_FUNCTION) { |
6244 | old_hash = rcu_dereference_protected(ftrace_graph_hash, |
6245 | lockdep_is_held(&graph_lock)); |
6246 | rcu_assign_pointer(ftrace_graph_hash, new_hash); |
6247 | } else { |
6248 | old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
6249 | lockdep_is_held(&graph_lock)); |
6250 | rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); |
6251 | } |
6252 | |
6253 | mutex_unlock(lock: &graph_lock); |
6254 | |
6255 | /* |
6256 | * We need to do a hard force of sched synchronization. |
6257 | * This is because we use preempt_disable() to do RCU, but |
6258 | * the function tracers can be called where RCU is not watching |
6259 | * (like before user_exit()). We can not rely on the RCU |
6260 | * infrastructure to do the synchronization, thus we must do it |
6261 | * ourselves. |
6262 | */ |
6263 | if (old_hash != EMPTY_HASH) |
6264 | synchronize_rcu_tasks_rude(); |
6265 | |
6266 | free_ftrace_hash(hash: old_hash); |
6267 | } |
6268 | |
6269 | out: |
6270 | free_ftrace_hash(hash: fgd->new_hash); |
6271 | kfree(objp: fgd); |
6272 | |
6273 | return ret; |
6274 | } |
6275 | |
6276 | static int |
6277 | ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) |
6278 | { |
6279 | struct ftrace_glob func_g; |
6280 | struct dyn_ftrace *rec; |
6281 | struct ftrace_page *pg; |
6282 | struct ftrace_func_entry *entry; |
6283 | int fail = 1; |
6284 | int not; |
6285 | |
6286 | /* decode regex */ |
6287 | func_g.type = filter_parse_regex(buff: buffer, strlen(buffer), |
6288 | search: &func_g.search, not: ¬); |
6289 | |
6290 | func_g.len = strlen(func_g.search); |
6291 | |
6292 | mutex_lock(&ftrace_lock); |
6293 | |
6294 | if (unlikely(ftrace_disabled)) { |
6295 | mutex_unlock(lock: &ftrace_lock); |
6296 | return -ENODEV; |
6297 | } |
6298 | |
6299 | do_for_each_ftrace_rec(pg, rec) { |
6300 | |
6301 | if (rec->flags & FTRACE_FL_DISABLED) |
6302 | continue; |
6303 | |
6304 | if (ftrace_match_record(rec, func_g: &func_g, NULL, exclude_mod: 0)) { |
6305 | entry = ftrace_lookup_ip(hash, ip: rec->ip); |
6306 | |
6307 | if (!not) { |
6308 | fail = 0; |
6309 | |
6310 | if (entry) |
6311 | continue; |
6312 | if (add_hash_entry(hash, ip: rec->ip) < 0) |
6313 | goto out; |
6314 | } else { |
6315 | if (entry) { |
6316 | free_hash_entry(hash, entry); |
6317 | fail = 0; |
6318 | } |
6319 | } |
6320 | } |
6321 | } while_for_each_ftrace_rec(); |
6322 | out: |
6323 | mutex_unlock(lock: &ftrace_lock); |
6324 | |
6325 | if (fail) |
6326 | return -EINVAL; |
6327 | |
6328 | return 0; |
6329 | } |
6330 | |
6331 | static ssize_t |
6332 | ftrace_graph_write(struct file *file, const char __user *ubuf, |
6333 | size_t cnt, loff_t *ppos) |
6334 | { |
6335 | ssize_t read, ret = 0; |
6336 | struct ftrace_graph_data *fgd = file->private_data; |
6337 | struct trace_parser *parser; |
6338 | |
6339 | if (!cnt) |
6340 | return 0; |
6341 | |
6342 | /* Read mode uses seq functions */ |
6343 | if (file->f_mode & FMODE_READ) { |
6344 | struct seq_file *m = file->private_data; |
6345 | fgd = m->private; |
6346 | } |
6347 | |
6348 | parser = &fgd->parser; |
6349 | |
6350 | read = trace_get_user(parser, ubuf, cnt, ppos); |
6351 | |
6352 | if (read >= 0 && trace_parser_loaded(parser) && |
6353 | !trace_parser_cont(parser)) { |
6354 | |
6355 | ret = ftrace_graph_set_hash(hash: fgd->new_hash, |
6356 | buffer: parser->buffer); |
6357 | trace_parser_clear(parser); |
6358 | } |
6359 | |
6360 | if (!ret) |
6361 | ret = read; |
6362 | |
6363 | return ret; |
6364 | } |
6365 | |
6366 | static const struct file_operations ftrace_graph_fops = { |
6367 | .open = ftrace_graph_open, |
6368 | .read = seq_read, |
6369 | .write = ftrace_graph_write, |
6370 | .llseek = tracing_lseek, |
6371 | .release = ftrace_graph_release, |
6372 | }; |
6373 | |
6374 | static const struct file_operations ftrace_graph_notrace_fops = { |
6375 | .open = ftrace_graph_notrace_open, |
6376 | .read = seq_read, |
6377 | .write = ftrace_graph_write, |
6378 | .llseek = tracing_lseek, |
6379 | .release = ftrace_graph_release, |
6380 | }; |
6381 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
6382 | |
6383 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
6384 | struct dentry *parent) |
6385 | { |
6386 | |
6387 | trace_create_file(name: "set_ftrace_filter" , TRACE_MODE_WRITE, parent, |
6388 | data: ops, fops: &ftrace_filter_fops); |
6389 | |
6390 | trace_create_file(name: "set_ftrace_notrace" , TRACE_MODE_WRITE, parent, |
6391 | data: ops, fops: &ftrace_notrace_fops); |
6392 | } |
6393 | |
6394 | /* |
6395 | * The name "destroy_filter_files" is really a misnomer. Although |
6396 | * in the future, it may actually delete the files, but this is |
6397 | * really intended to make sure the ops passed in are disabled |
6398 | * and that when this function returns, the caller is free to |
6399 | * free the ops. |
6400 | * |
6401 | * The "destroy" name is only to match the "create" name that this |
6402 | * should be paired with. |
6403 | */ |
6404 | void ftrace_destroy_filter_files(struct ftrace_ops *ops) |
6405 | { |
6406 | mutex_lock(&ftrace_lock); |
6407 | if (ops->flags & FTRACE_OPS_FL_ENABLED) |
6408 | ftrace_shutdown(ops, command: 0); |
6409 | ops->flags |= FTRACE_OPS_FL_DELETED; |
6410 | ftrace_free_filter(ops); |
6411 | mutex_unlock(lock: &ftrace_lock); |
6412 | } |
6413 | |
6414 | static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) |
6415 | { |
6416 | |
6417 | trace_create_file(name: "available_filter_functions" , TRACE_MODE_READ, |
6418 | parent: d_tracer, NULL, fops: &ftrace_avail_fops); |
6419 | |
6420 | trace_create_file(name: "available_filter_functions_addrs" , TRACE_MODE_READ, |
6421 | parent: d_tracer, NULL, fops: &ftrace_avail_addrs_fops); |
6422 | |
6423 | trace_create_file(name: "enabled_functions" , TRACE_MODE_READ, |
6424 | parent: d_tracer, NULL, fops: &ftrace_enabled_fops); |
6425 | |
6426 | trace_create_file(name: "touched_functions" , TRACE_MODE_READ, |
6427 | parent: d_tracer, NULL, fops: &ftrace_touched_fops); |
6428 | |
6429 | ftrace_create_filter_files(ops: &global_ops, parent: d_tracer); |
6430 | |
6431 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
6432 | trace_create_file(name: "set_graph_function" , TRACE_MODE_WRITE, parent: d_tracer, |
6433 | NULL, |
6434 | fops: &ftrace_graph_fops); |
6435 | trace_create_file(name: "set_graph_notrace" , TRACE_MODE_WRITE, parent: d_tracer, |
6436 | NULL, |
6437 | fops: &ftrace_graph_notrace_fops); |
6438 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
6439 | |
6440 | return 0; |
6441 | } |
6442 | |
6443 | static int ftrace_cmp_ips(const void *a, const void *b) |
6444 | { |
6445 | const unsigned long *ipa = a; |
6446 | const unsigned long *ipb = b; |
6447 | |
6448 | if (*ipa > *ipb) |
6449 | return 1; |
6450 | if (*ipa < *ipb) |
6451 | return -1; |
6452 | return 0; |
6453 | } |
6454 | |
6455 | #ifdef CONFIG_FTRACE_SORT_STARTUP_TEST |
6456 | static void test_is_sorted(unsigned long *start, unsigned long count) |
6457 | { |
6458 | int i; |
6459 | |
6460 | for (i = 1; i < count; i++) { |
6461 | if (WARN(start[i - 1] > start[i], |
6462 | "[%d] %pS at %lx is not sorted with %pS at %lx\n" , i, |
6463 | (void *)start[i - 1], start[i - 1], |
6464 | (void *)start[i], start[i])) |
6465 | break; |
6466 | } |
6467 | if (i == count) |
6468 | pr_info("ftrace section at %px sorted properly\n" , start); |
6469 | } |
6470 | #else |
6471 | static void test_is_sorted(unsigned long *start, unsigned long count) |
6472 | { |
6473 | } |
6474 | #endif |
6475 | |
6476 | static int ftrace_process_locs(struct module *mod, |
6477 | unsigned long *start, |
6478 | unsigned long *end) |
6479 | { |
6480 | struct ftrace_page *pg_unuse = NULL; |
6481 | struct ftrace_page *start_pg; |
6482 | struct ftrace_page *pg; |
6483 | struct dyn_ftrace *rec; |
6484 | unsigned long skipped = 0; |
6485 | unsigned long count; |
6486 | unsigned long *p; |
6487 | unsigned long addr; |
6488 | unsigned long flags = 0; /* Shut up gcc */ |
6489 | int ret = -ENOMEM; |
6490 | |
6491 | count = end - start; |
6492 | |
6493 | if (!count) |
6494 | return 0; |
6495 | |
6496 | /* |
6497 | * Sorting mcount in vmlinux at build time depend on |
6498 | * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in |
6499 | * modules can not be sorted at build time. |
6500 | */ |
6501 | if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) { |
6502 | sort(base: start, num: count, size: sizeof(*start), |
6503 | cmp_func: ftrace_cmp_ips, NULL); |
6504 | } else { |
6505 | test_is_sorted(start, count); |
6506 | } |
6507 | |
6508 | start_pg = ftrace_allocate_pages(num_to_init: count); |
6509 | if (!start_pg) |
6510 | return -ENOMEM; |
6511 | |
6512 | mutex_lock(&ftrace_lock); |
6513 | |
6514 | /* |
6515 | * Core and each module needs their own pages, as |
6516 | * modules will free them when they are removed. |
6517 | * Force a new page to be allocated for modules. |
6518 | */ |
6519 | if (!mod) { |
6520 | WARN_ON(ftrace_pages || ftrace_pages_start); |
6521 | /* First initialization */ |
6522 | ftrace_pages = ftrace_pages_start = start_pg; |
6523 | } else { |
6524 | if (!ftrace_pages) |
6525 | goto out; |
6526 | |
6527 | if (WARN_ON(ftrace_pages->next)) { |
6528 | /* Hmm, we have free pages? */ |
6529 | while (ftrace_pages->next) |
6530 | ftrace_pages = ftrace_pages->next; |
6531 | } |
6532 | |
6533 | ftrace_pages->next = start_pg; |
6534 | } |
6535 | |
6536 | p = start; |
6537 | pg = start_pg; |
6538 | while (p < end) { |
6539 | unsigned long end_offset; |
6540 | addr = ftrace_call_adjust(addr: *p++); |
6541 | /* |
6542 | * Some architecture linkers will pad between |
6543 | * the different mcount_loc sections of different |
6544 | * object files to satisfy alignments. |
6545 | * Skip any NULL pointers. |
6546 | */ |
6547 | if (!addr) { |
6548 | skipped++; |
6549 | continue; |
6550 | } |
6551 | |
6552 | end_offset = (pg->index+1) * sizeof(pg->records[0]); |
6553 | if (end_offset > PAGE_SIZE << pg->order) { |
6554 | /* We should have allocated enough */ |
6555 | if (WARN_ON(!pg->next)) |
6556 | break; |
6557 | pg = pg->next; |
6558 | } |
6559 | |
6560 | rec = &pg->records[pg->index++]; |
6561 | rec->ip = addr; |
6562 | } |
6563 | |
6564 | if (pg->next) { |
6565 | pg_unuse = pg->next; |
6566 | pg->next = NULL; |
6567 | } |
6568 | |
6569 | /* Assign the last page to ftrace_pages */ |
6570 | ftrace_pages = pg; |
6571 | |
6572 | /* |
6573 | * We only need to disable interrupts on start up |
6574 | * because we are modifying code that an interrupt |
6575 | * may execute, and the modification is not atomic. |
6576 | * But for modules, nothing runs the code we modify |
6577 | * until we are finished with it, and there's no |
6578 | * reason to cause large interrupt latencies while we do it. |
6579 | */ |
6580 | if (!mod) |
6581 | local_irq_save(flags); |
6582 | ftrace_update_code(mod, new_pgs: start_pg); |
6583 | if (!mod) |
6584 | local_irq_restore(flags); |
6585 | ret = 0; |
6586 | out: |
6587 | mutex_unlock(lock: &ftrace_lock); |
6588 | |
6589 | /* We should have used all pages unless we skipped some */ |
6590 | if (pg_unuse) { |
6591 | WARN_ON(!skipped); |
6592 | ftrace_free_pages(pages: pg_unuse); |
6593 | } |
6594 | return ret; |
6595 | } |
6596 | |
6597 | struct ftrace_mod_func { |
6598 | struct list_head list; |
6599 | char *name; |
6600 | unsigned long ip; |
6601 | unsigned int size; |
6602 | }; |
6603 | |
6604 | struct ftrace_mod_map { |
6605 | struct rcu_head rcu; |
6606 | struct list_head list; |
6607 | struct module *mod; |
6608 | unsigned long start_addr; |
6609 | unsigned long end_addr; |
6610 | struct list_head funcs; |
6611 | unsigned int num_funcs; |
6612 | }; |
6613 | |
6614 | static int ftrace_get_trampoline_kallsym(unsigned int symnum, |
6615 | unsigned long *value, char *type, |
6616 | char *name, char *module_name, |
6617 | int *exported) |
6618 | { |
6619 | struct ftrace_ops *op; |
6620 | |
6621 | list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) { |
6622 | if (!op->trampoline || symnum--) |
6623 | continue; |
6624 | *value = op->trampoline; |
6625 | *type = 't'; |
6626 | strscpy(p: name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN); |
6627 | strscpy(p: module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN); |
6628 | *exported = 0; |
6629 | return 0; |
6630 | } |
6631 | |
6632 | return -ERANGE; |
6633 | } |
6634 | |
6635 | #if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES) |
6636 | /* |
6637 | * Check if the current ops references the given ip. |
6638 | * |
6639 | * If the ops traces all functions, then it was already accounted for. |
6640 | * If the ops does not trace the current record function, skip it. |
6641 | * If the ops ignores the function via notrace filter, skip it. |
6642 | */ |
6643 | static bool |
6644 | ops_references_ip(struct ftrace_ops *ops, unsigned long ip) |
6645 | { |
6646 | /* If ops isn't enabled, ignore it */ |
6647 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) |
6648 | return false; |
6649 | |
6650 | /* If ops traces all then it includes this function */ |
6651 | if (ops_traces_mod(ops)) |
6652 | return true; |
6653 | |
6654 | /* The function must be in the filter */ |
6655 | if (!ftrace_hash_empty(hash: ops->func_hash->filter_hash) && |
6656 | !__ftrace_lookup_ip(hash: ops->func_hash->filter_hash, ip)) |
6657 | return false; |
6658 | |
6659 | /* If in notrace hash, we ignore it too */ |
6660 | if (ftrace_lookup_ip(hash: ops->func_hash->notrace_hash, ip)) |
6661 | return false; |
6662 | |
6663 | return true; |
6664 | } |
6665 | #endif |
6666 | |
6667 | #ifdef CONFIG_MODULES |
6668 | |
6669 | #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) |
6670 | |
6671 | static LIST_HEAD(ftrace_mod_maps); |
6672 | |
6673 | static int referenced_filters(struct dyn_ftrace *rec) |
6674 | { |
6675 | struct ftrace_ops *ops; |
6676 | int cnt = 0; |
6677 | |
6678 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
6679 | if (ops_references_ip(ops, ip: rec->ip)) { |
6680 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT)) |
6681 | continue; |
6682 | if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
6683 | continue; |
6684 | cnt++; |
6685 | if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) |
6686 | rec->flags |= FTRACE_FL_REGS; |
6687 | if (cnt == 1 && ops->trampoline) |
6688 | rec->flags |= FTRACE_FL_TRAMP; |
6689 | else |
6690 | rec->flags &= ~FTRACE_FL_TRAMP; |
6691 | } |
6692 | } |
6693 | |
6694 | return cnt; |
6695 | } |
6696 | |
6697 | static void |
6698 | clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) |
6699 | { |
6700 | struct ftrace_func_entry *entry; |
6701 | struct dyn_ftrace *rec; |
6702 | int i; |
6703 | |
6704 | if (ftrace_hash_empty(hash)) |
6705 | return; |
6706 | |
6707 | for (i = 0; i < pg->index; i++) { |
6708 | rec = &pg->records[i]; |
6709 | entry = __ftrace_lookup_ip(hash, ip: rec->ip); |
6710 | /* |
6711 | * Do not allow this rec to match again. |
6712 | * Yeah, it may waste some memory, but will be removed |
6713 | * if/when the hash is modified again. |
6714 | */ |
6715 | if (entry) |
6716 | entry->ip = 0; |
6717 | } |
6718 | } |
6719 | |
6720 | /* Clear any records from hashes */ |
6721 | static void clear_mod_from_hashes(struct ftrace_page *pg) |
6722 | { |
6723 | struct trace_array *tr; |
6724 | |
6725 | mutex_lock(&trace_types_lock); |
6726 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
6727 | if (!tr->ops || !tr->ops->func_hash) |
6728 | continue; |
6729 | mutex_lock(&tr->ops->func_hash->regex_lock); |
6730 | clear_mod_from_hash(pg, hash: tr->ops->func_hash->filter_hash); |
6731 | clear_mod_from_hash(pg, hash: tr->ops->func_hash->notrace_hash); |
6732 | mutex_unlock(lock: &tr->ops->func_hash->regex_lock); |
6733 | } |
6734 | mutex_unlock(lock: &trace_types_lock); |
6735 | } |
6736 | |
6737 | static void ftrace_free_mod_map(struct rcu_head *rcu) |
6738 | { |
6739 | struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); |
6740 | struct ftrace_mod_func *mod_func; |
6741 | struct ftrace_mod_func *n; |
6742 | |
6743 | /* All the contents of mod_map are now not visible to readers */ |
6744 | list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { |
6745 | kfree(objp: mod_func->name); |
6746 | list_del(entry: &mod_func->list); |
6747 | kfree(objp: mod_func); |
6748 | } |
6749 | |
6750 | kfree(objp: mod_map); |
6751 | } |
6752 | |
6753 | void ftrace_release_mod(struct module *mod) |
6754 | { |
6755 | struct ftrace_mod_map *mod_map; |
6756 | struct ftrace_mod_map *n; |
6757 | struct dyn_ftrace *rec; |
6758 | struct ftrace_page **last_pg; |
6759 | struct ftrace_page *tmp_page = NULL; |
6760 | struct ftrace_page *pg; |
6761 | |
6762 | mutex_lock(&ftrace_lock); |
6763 | |
6764 | if (ftrace_disabled) |
6765 | goto out_unlock; |
6766 | |
6767 | list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { |
6768 | if (mod_map->mod == mod) { |
6769 | list_del_rcu(entry: &mod_map->list); |
6770 | call_rcu(head: &mod_map->rcu, func: ftrace_free_mod_map); |
6771 | break; |
6772 | } |
6773 | } |
6774 | |
6775 | /* |
6776 | * Each module has its own ftrace_pages, remove |
6777 | * them from the list. |
6778 | */ |
6779 | last_pg = &ftrace_pages_start; |
6780 | for (pg = ftrace_pages_start; pg; pg = *last_pg) { |
6781 | rec = &pg->records[0]; |
6782 | if (within_module(addr: rec->ip, mod)) { |
6783 | /* |
6784 | * As core pages are first, the first |
6785 | * page should never be a module page. |
6786 | */ |
6787 | if (WARN_ON(pg == ftrace_pages_start)) |
6788 | goto out_unlock; |
6789 | |
6790 | /* Check if we are deleting the last page */ |
6791 | if (pg == ftrace_pages) |
6792 | ftrace_pages = next_to_ftrace_page(last_pg); |
6793 | |
6794 | ftrace_update_tot_cnt -= pg->index; |
6795 | *last_pg = pg->next; |
6796 | |
6797 | pg->next = tmp_page; |
6798 | tmp_page = pg; |
6799 | } else |
6800 | last_pg = &pg->next; |
6801 | } |
6802 | out_unlock: |
6803 | mutex_unlock(lock: &ftrace_lock); |
6804 | |
6805 | for (pg = tmp_page; pg; pg = tmp_page) { |
6806 | |
6807 | /* Needs to be called outside of ftrace_lock */ |
6808 | clear_mod_from_hashes(pg); |
6809 | |
6810 | if (pg->records) { |
6811 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
6812 | ftrace_number_of_pages -= 1 << pg->order; |
6813 | } |
6814 | tmp_page = pg->next; |
6815 | kfree(objp: pg); |
6816 | ftrace_number_of_groups--; |
6817 | } |
6818 | } |
6819 | |
6820 | void ftrace_module_enable(struct module *mod) |
6821 | { |
6822 | struct dyn_ftrace *rec; |
6823 | struct ftrace_page *pg; |
6824 | |
6825 | mutex_lock(&ftrace_lock); |
6826 | |
6827 | if (ftrace_disabled) |
6828 | goto out_unlock; |
6829 | |
6830 | /* |
6831 | * If the tracing is enabled, go ahead and enable the record. |
6832 | * |
6833 | * The reason not to enable the record immediately is the |
6834 | * inherent check of ftrace_make_nop/ftrace_make_call for |
6835 | * correct previous instructions. Making first the NOP |
6836 | * conversion puts the module to the correct state, thus |
6837 | * passing the ftrace_make_call check. |
6838 | * |
6839 | * We also delay this to after the module code already set the |
6840 | * text to read-only, as we now need to set it back to read-write |
6841 | * so that we can modify the text. |
6842 | */ |
6843 | if (ftrace_start_up) |
6844 | ftrace_arch_code_modify_prepare(); |
6845 | |
6846 | do_for_each_ftrace_rec(pg, rec) { |
6847 | int cnt; |
6848 | /* |
6849 | * do_for_each_ftrace_rec() is a double loop. |
6850 | * module text shares the pg. If a record is |
6851 | * not part of this module, then skip this pg, |
6852 | * which the "break" will do. |
6853 | */ |
6854 | if (!within_module(addr: rec->ip, mod)) |
6855 | break; |
6856 | |
6857 | /* Weak functions should still be ignored */ |
6858 | if (!test_for_valid_rec(rec)) { |
6859 | /* Clear all other flags. Should not be enabled anyway */ |
6860 | rec->flags = FTRACE_FL_DISABLED; |
6861 | continue; |
6862 | } |
6863 | |
6864 | cnt = 0; |
6865 | |
6866 | /* |
6867 | * When adding a module, we need to check if tracers are |
6868 | * currently enabled and if they are, and can trace this record, |
6869 | * we need to enable the module functions as well as update the |
6870 | * reference counts for those function records. |
6871 | */ |
6872 | if (ftrace_start_up) |
6873 | cnt += referenced_filters(rec); |
6874 | |
6875 | rec->flags &= ~FTRACE_FL_DISABLED; |
6876 | rec->flags += cnt; |
6877 | |
6878 | if (ftrace_start_up && cnt) { |
6879 | int failed = __ftrace_replace_code(rec, enable: 1); |
6880 | if (failed) { |
6881 | ftrace_bug(failed, rec); |
6882 | goto out_loop; |
6883 | } |
6884 | } |
6885 | |
6886 | } while_for_each_ftrace_rec(); |
6887 | |
6888 | out_loop: |
6889 | if (ftrace_start_up) |
6890 | ftrace_arch_code_modify_post_process(); |
6891 | |
6892 | out_unlock: |
6893 | mutex_unlock(lock: &ftrace_lock); |
6894 | |
6895 | process_cached_mods(mod_name: mod->name); |
6896 | } |
6897 | |
6898 | void ftrace_module_init(struct module *mod) |
6899 | { |
6900 | int ret; |
6901 | |
6902 | if (ftrace_disabled || !mod->num_ftrace_callsites) |
6903 | return; |
6904 | |
6905 | ret = ftrace_process_locs(mod, start: mod->ftrace_callsites, |
6906 | end: mod->ftrace_callsites + mod->num_ftrace_callsites); |
6907 | if (ret) |
6908 | pr_warn("ftrace: failed to allocate entries for module '%s' functions\n" , |
6909 | mod->name); |
6910 | } |
6911 | |
6912 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, |
6913 | struct dyn_ftrace *rec) |
6914 | { |
6915 | struct ftrace_mod_func *mod_func; |
6916 | unsigned long symsize; |
6917 | unsigned long offset; |
6918 | char str[KSYM_SYMBOL_LEN]; |
6919 | char *modname; |
6920 | const char *ret; |
6921 | |
6922 | ret = kallsyms_lookup(addr: rec->ip, symbolsize: &symsize, offset: &offset, modname: &modname, namebuf: str); |
6923 | if (!ret) |
6924 | return; |
6925 | |
6926 | mod_func = kmalloc(size: sizeof(*mod_func), GFP_KERNEL); |
6927 | if (!mod_func) |
6928 | return; |
6929 | |
6930 | mod_func->name = kstrdup(s: str, GFP_KERNEL); |
6931 | if (!mod_func->name) { |
6932 | kfree(objp: mod_func); |
6933 | return; |
6934 | } |
6935 | |
6936 | mod_func->ip = rec->ip - offset; |
6937 | mod_func->size = symsize; |
6938 | |
6939 | mod_map->num_funcs++; |
6940 | |
6941 | list_add_rcu(new: &mod_func->list, head: &mod_map->funcs); |
6942 | } |
6943 | |
6944 | static struct ftrace_mod_map * |
6945 | allocate_ftrace_mod_map(struct module *mod, |
6946 | unsigned long start, unsigned long end) |
6947 | { |
6948 | struct ftrace_mod_map *mod_map; |
6949 | |
6950 | mod_map = kmalloc(size: sizeof(*mod_map), GFP_KERNEL); |
6951 | if (!mod_map) |
6952 | return NULL; |
6953 | |
6954 | mod_map->mod = mod; |
6955 | mod_map->start_addr = start; |
6956 | mod_map->end_addr = end; |
6957 | mod_map->num_funcs = 0; |
6958 | |
6959 | INIT_LIST_HEAD_RCU(list: &mod_map->funcs); |
6960 | |
6961 | list_add_rcu(new: &mod_map->list, head: &ftrace_mod_maps); |
6962 | |
6963 | return mod_map; |
6964 | } |
6965 | |
6966 | static const char * |
6967 | ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, |
6968 | unsigned long addr, unsigned long *size, |
6969 | unsigned long *off, char *sym) |
6970 | { |
6971 | struct ftrace_mod_func *found_func = NULL; |
6972 | struct ftrace_mod_func *mod_func; |
6973 | |
6974 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { |
6975 | if (addr >= mod_func->ip && |
6976 | addr < mod_func->ip + mod_func->size) { |
6977 | found_func = mod_func; |
6978 | break; |
6979 | } |
6980 | } |
6981 | |
6982 | if (found_func) { |
6983 | if (size) |
6984 | *size = found_func->size; |
6985 | if (off) |
6986 | *off = addr - found_func->ip; |
6987 | if (sym) |
6988 | strscpy(p: sym, q: found_func->name, KSYM_NAME_LEN); |
6989 | |
6990 | return found_func->name; |
6991 | } |
6992 | |
6993 | return NULL; |
6994 | } |
6995 | |
6996 | const char * |
6997 | ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, |
6998 | unsigned long *off, char **modname, char *sym) |
6999 | { |
7000 | struct ftrace_mod_map *mod_map; |
7001 | const char *ret = NULL; |
7002 | |
7003 | /* mod_map is freed via call_rcu() */ |
7004 | preempt_disable(); |
7005 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
7006 | ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); |
7007 | if (ret) { |
7008 | if (modname) |
7009 | *modname = mod_map->mod->name; |
7010 | break; |
7011 | } |
7012 | } |
7013 | preempt_enable(); |
7014 | |
7015 | return ret; |
7016 | } |
7017 | |
7018 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7019 | char *type, char *name, |
7020 | char *module_name, int *exported) |
7021 | { |
7022 | struct ftrace_mod_map *mod_map; |
7023 | struct ftrace_mod_func *mod_func; |
7024 | int ret; |
7025 | |
7026 | preempt_disable(); |
7027 | list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { |
7028 | |
7029 | if (symnum >= mod_map->num_funcs) { |
7030 | symnum -= mod_map->num_funcs; |
7031 | continue; |
7032 | } |
7033 | |
7034 | list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { |
7035 | if (symnum > 1) { |
7036 | symnum--; |
7037 | continue; |
7038 | } |
7039 | |
7040 | *value = mod_func->ip; |
7041 | *type = 'T'; |
7042 | strscpy(p: name, q: mod_func->name, KSYM_NAME_LEN); |
7043 | strscpy(p: module_name, q: mod_map->mod->name, MODULE_NAME_LEN); |
7044 | *exported = 1; |
7045 | preempt_enable(); |
7046 | return 0; |
7047 | } |
7048 | WARN_ON(1); |
7049 | break; |
7050 | } |
7051 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
7052 | module_name, exported); |
7053 | preempt_enable(); |
7054 | return ret; |
7055 | } |
7056 | |
7057 | #else |
7058 | static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, |
7059 | struct dyn_ftrace *rec) { } |
7060 | static inline struct ftrace_mod_map * |
7061 | allocate_ftrace_mod_map(struct module *mod, |
7062 | unsigned long start, unsigned long end) |
7063 | { |
7064 | return NULL; |
7065 | } |
7066 | int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, |
7067 | char *type, char *name, char *module_name, |
7068 | int *exported) |
7069 | { |
7070 | int ret; |
7071 | |
7072 | preempt_disable(); |
7073 | ret = ftrace_get_trampoline_kallsym(symnum, value, type, name, |
7074 | module_name, exported); |
7075 | preempt_enable(); |
7076 | return ret; |
7077 | } |
7078 | #endif /* CONFIG_MODULES */ |
7079 | |
7080 | struct ftrace_init_func { |
7081 | struct list_head list; |
7082 | unsigned long ip; |
7083 | }; |
7084 | |
7085 | /* Clear any init ips from hashes */ |
7086 | static void |
7087 | clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) |
7088 | { |
7089 | struct ftrace_func_entry *entry; |
7090 | |
7091 | entry = ftrace_lookup_ip(hash, ip: func->ip); |
7092 | /* |
7093 | * Do not allow this rec to match again. |
7094 | * Yeah, it may waste some memory, but will be removed |
7095 | * if/when the hash is modified again. |
7096 | */ |
7097 | if (entry) |
7098 | entry->ip = 0; |
7099 | } |
7100 | |
7101 | static void |
7102 | clear_func_from_hashes(struct ftrace_init_func *func) |
7103 | { |
7104 | struct trace_array *tr; |
7105 | |
7106 | mutex_lock(&trace_types_lock); |
7107 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
7108 | if (!tr->ops || !tr->ops->func_hash) |
7109 | continue; |
7110 | mutex_lock(&tr->ops->func_hash->regex_lock); |
7111 | clear_func_from_hash(func, hash: tr->ops->func_hash->filter_hash); |
7112 | clear_func_from_hash(func, hash: tr->ops->func_hash->notrace_hash); |
7113 | mutex_unlock(lock: &tr->ops->func_hash->regex_lock); |
7114 | } |
7115 | mutex_unlock(lock: &trace_types_lock); |
7116 | } |
7117 | |
7118 | static void add_to_clear_hash_list(struct list_head *clear_list, |
7119 | struct dyn_ftrace *rec) |
7120 | { |
7121 | struct ftrace_init_func *func; |
7122 | |
7123 | func = kmalloc(size: sizeof(*func), GFP_KERNEL); |
7124 | if (!func) { |
7125 | MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n" ); |
7126 | return; |
7127 | } |
7128 | |
7129 | func->ip = rec->ip; |
7130 | list_add(new: &func->list, head: clear_list); |
7131 | } |
7132 | |
7133 | void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) |
7134 | { |
7135 | unsigned long start = (unsigned long)(start_ptr); |
7136 | unsigned long end = (unsigned long)(end_ptr); |
7137 | struct ftrace_page **last_pg = &ftrace_pages_start; |
7138 | struct ftrace_page *pg; |
7139 | struct dyn_ftrace *rec; |
7140 | struct dyn_ftrace key; |
7141 | struct ftrace_mod_map *mod_map = NULL; |
7142 | struct ftrace_init_func *func, *func_next; |
7143 | LIST_HEAD(clear_hash); |
7144 | |
7145 | key.ip = start; |
7146 | key.flags = end; /* overload flags, as it is unsigned long */ |
7147 | |
7148 | mutex_lock(&ftrace_lock); |
7149 | |
7150 | /* |
7151 | * If we are freeing module init memory, then check if |
7152 | * any tracer is active. If so, we need to save a mapping of |
7153 | * the module functions being freed with the address. |
7154 | */ |
7155 | if (mod && ftrace_ops_list != &ftrace_list_end) |
7156 | mod_map = allocate_ftrace_mod_map(mod, start, end); |
7157 | |
7158 | for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { |
7159 | if (end < pg->records[0].ip || |
7160 | start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) |
7161 | continue; |
7162 | again: |
7163 | rec = bsearch(key: &key, base: pg->records, num: pg->index, |
7164 | size: sizeof(struct dyn_ftrace), |
7165 | cmp: ftrace_cmp_recs); |
7166 | if (!rec) |
7167 | continue; |
7168 | |
7169 | /* rec will be cleared from hashes after ftrace_lock unlock */ |
7170 | add_to_clear_hash_list(clear_list: &clear_hash, rec); |
7171 | |
7172 | if (mod_map) |
7173 | save_ftrace_mod_rec(mod_map, rec); |
7174 | |
7175 | pg->index--; |
7176 | ftrace_update_tot_cnt--; |
7177 | if (!pg->index) { |
7178 | *last_pg = pg->next; |
7179 | if (pg->records) { |
7180 | free_pages(addr: (unsigned long)pg->records, order: pg->order); |
7181 | ftrace_number_of_pages -= 1 << pg->order; |
7182 | } |
7183 | ftrace_number_of_groups--; |
7184 | kfree(objp: pg); |
7185 | pg = container_of(last_pg, struct ftrace_page, next); |
7186 | if (!(*last_pg)) |
7187 | ftrace_pages = pg; |
7188 | continue; |
7189 | } |
7190 | memmove(rec, rec + 1, |
7191 | (pg->index - (rec - pg->records)) * sizeof(*rec)); |
7192 | /* More than one function may be in this block */ |
7193 | goto again; |
7194 | } |
7195 | mutex_unlock(lock: &ftrace_lock); |
7196 | |
7197 | list_for_each_entry_safe(func, func_next, &clear_hash, list) { |
7198 | clear_func_from_hashes(func); |
7199 | kfree(objp: func); |
7200 | } |
7201 | } |
7202 | |
7203 | void __init ftrace_free_init_mem(void) |
7204 | { |
7205 | void *start = (void *)(&__init_begin); |
7206 | void *end = (void *)(&__init_end); |
7207 | |
7208 | ftrace_boot_snapshot(); |
7209 | |
7210 | ftrace_free_mem(NULL, start_ptr: start, end_ptr: end); |
7211 | } |
7212 | |
7213 | int __init __weak ftrace_dyn_arch_init(void) |
7214 | { |
7215 | return 0; |
7216 | } |
7217 | |
7218 | void __init ftrace_init(void) |
7219 | { |
7220 | extern unsigned long __start_mcount_loc[]; |
7221 | extern unsigned long __stop_mcount_loc[]; |
7222 | unsigned long count, flags; |
7223 | int ret; |
7224 | |
7225 | local_irq_save(flags); |
7226 | ret = ftrace_dyn_arch_init(); |
7227 | local_irq_restore(flags); |
7228 | if (ret) |
7229 | goto failed; |
7230 | |
7231 | count = __stop_mcount_loc - __start_mcount_loc; |
7232 | if (!count) { |
7233 | pr_info("ftrace: No functions to be traced?\n" ); |
7234 | goto failed; |
7235 | } |
7236 | |
7237 | pr_info("ftrace: allocating %ld entries in %ld pages\n" , |
7238 | count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); |
7239 | |
7240 | ret = ftrace_process_locs(NULL, |
7241 | start: __start_mcount_loc, |
7242 | end: __stop_mcount_loc); |
7243 | if (ret) { |
7244 | pr_warn("ftrace: failed to allocate entries for functions\n" ); |
7245 | goto failed; |
7246 | } |
7247 | |
7248 | pr_info("ftrace: allocated %ld pages with %ld groups\n" , |
7249 | ftrace_number_of_pages, ftrace_number_of_groups); |
7250 | |
7251 | last_ftrace_enabled = ftrace_enabled = 1; |
7252 | |
7253 | set_ftrace_early_filters(); |
7254 | |
7255 | return; |
7256 | failed: |
7257 | ftrace_disabled = 1; |
7258 | } |
7259 | |
7260 | /* Do nothing if arch does not support this */ |
7261 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) |
7262 | { |
7263 | } |
7264 | |
7265 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
7266 | { |
7267 | unsigned long trampoline = ops->trampoline; |
7268 | |
7269 | arch_ftrace_update_trampoline(ops); |
7270 | if (ops->trampoline && ops->trampoline != trampoline && |
7271 | (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) { |
7272 | /* Add to kallsyms before the perf events */ |
7273 | ftrace_add_trampoline_to_kallsyms(ops); |
7274 | perf_event_ksymbol(ksym_type: PERF_RECORD_KSYMBOL_TYPE_OOL, |
7275 | addr: ops->trampoline, len: ops->trampoline_size, unregister: false, |
7276 | FTRACE_TRAMPOLINE_SYM); |
7277 | /* |
7278 | * Record the perf text poke event after the ksymbol register |
7279 | * event. |
7280 | */ |
7281 | perf_event_text_poke(addr: (void *)ops->trampoline, NULL, old_len: 0, |
7282 | new_bytes: (void *)ops->trampoline, |
7283 | new_len: ops->trampoline_size); |
7284 | } |
7285 | } |
7286 | |
7287 | void ftrace_init_trace_array(struct trace_array *tr) |
7288 | { |
7289 | INIT_LIST_HEAD(list: &tr->func_probes); |
7290 | INIT_LIST_HEAD(list: &tr->mod_trace); |
7291 | INIT_LIST_HEAD(list: &tr->mod_notrace); |
7292 | } |
7293 | #else |
7294 | |
7295 | struct ftrace_ops global_ops = { |
7296 | .func = ftrace_stub, |
7297 | .flags = FTRACE_OPS_FL_INITIALIZED | |
7298 | FTRACE_OPS_FL_PID, |
7299 | }; |
7300 | |
7301 | static int __init ftrace_nodyn_init(void) |
7302 | { |
7303 | ftrace_enabled = 1; |
7304 | return 0; |
7305 | } |
7306 | core_initcall(ftrace_nodyn_init); |
7307 | |
7308 | static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } |
7309 | static inline void ftrace_startup_all(int command) { } |
7310 | |
7311 | static void ftrace_update_trampoline(struct ftrace_ops *ops) |
7312 | { |
7313 | } |
7314 | |
7315 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
7316 | |
7317 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
7318 | { |
7319 | tr->ops = &global_ops; |
7320 | tr->ops->private = tr; |
7321 | ftrace_init_trace_array(tr); |
7322 | } |
7323 | |
7324 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) |
7325 | { |
7326 | /* If we filter on pids, update to use the pid function */ |
7327 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { |
7328 | if (WARN_ON(tr->ops->func != ftrace_stub)) |
7329 | printk("ftrace ops had %pS for function\n" , |
7330 | tr->ops->func); |
7331 | } |
7332 | tr->ops->func = func; |
7333 | tr->ops->private = tr; |
7334 | } |
7335 | |
7336 | void ftrace_reset_array_ops(struct trace_array *tr) |
7337 | { |
7338 | tr->ops->func = ftrace_stub; |
7339 | } |
7340 | |
7341 | static nokprobe_inline void |
7342 | __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
7343 | struct ftrace_ops *ignored, struct ftrace_regs *fregs) |
7344 | { |
7345 | struct pt_regs *regs = ftrace_get_regs(fregs); |
7346 | struct ftrace_ops *op; |
7347 | int bit; |
7348 | |
7349 | /* |
7350 | * The ftrace_test_and_set_recursion() will disable preemption, |
7351 | * which is required since some of the ops may be dynamically |
7352 | * allocated, they must be freed after a synchronize_rcu(). |
7353 | */ |
7354 | bit = trace_test_and_set_recursion(ip, pip: parent_ip, TRACE_LIST_START); |
7355 | if (bit < 0) |
7356 | return; |
7357 | |
7358 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7359 | /* Stub functions don't need to be called nor tested */ |
7360 | if (op->flags & FTRACE_OPS_FL_STUB) |
7361 | continue; |
7362 | /* |
7363 | * Check the following for each ops before calling their func: |
7364 | * if RCU flag is set, then rcu_is_watching() must be true |
7365 | * Otherwise test if the ip matches the ops filter |
7366 | * |
7367 | * If any of the above fails then the op->func() is not executed. |
7368 | */ |
7369 | if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && |
7370 | ftrace_ops_test(ops: op, ip, regs)) { |
7371 | if (FTRACE_WARN_ON(!op->func)) { |
7372 | pr_warn("op=%p %pS\n" , op, op); |
7373 | goto out; |
7374 | } |
7375 | op->func(ip, parent_ip, op, fregs); |
7376 | } |
7377 | } while_for_each_ftrace_op(op); |
7378 | out: |
7379 | trace_clear_recursion(bit); |
7380 | } |
7381 | |
7382 | /* |
7383 | * Some archs only support passing ip and parent_ip. Even though |
7384 | * the list function ignores the op parameter, we do not want any |
7385 | * C side effects, where a function is called without the caller |
7386 | * sending a third parameter. |
7387 | * Archs are to support both the regs and ftrace_ops at the same time. |
7388 | * If they support ftrace_ops, it is assumed they support regs. |
7389 | * If call backs want to use regs, they must either check for regs |
7390 | * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. |
7391 | * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. |
7392 | * An architecture can pass partial regs with ftrace_ops and still |
7393 | * set the ARCH_SUPPORTS_FTRACE_OPS. |
7394 | * |
7395 | * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be |
7396 | * arch_ftrace_ops_list_func. |
7397 | */ |
7398 | #if ARCH_SUPPORTS_FTRACE_OPS |
7399 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
7400 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
7401 | { |
7402 | __ftrace_ops_list_func(ip, parent_ip, NULL, fregs); |
7403 | } |
7404 | #else |
7405 | void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) |
7406 | { |
7407 | __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); |
7408 | } |
7409 | #endif |
7410 | NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); |
7411 | |
7412 | /* |
7413 | * If there's only one function registered but it does not support |
7414 | * recursion, needs RCU protection, then this function will be called |
7415 | * by the mcount trampoline. |
7416 | */ |
7417 | static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, |
7418 | struct ftrace_ops *op, struct ftrace_regs *fregs) |
7419 | { |
7420 | int bit; |
7421 | |
7422 | bit = trace_test_and_set_recursion(ip, pip: parent_ip, TRACE_LIST_START); |
7423 | if (bit < 0) |
7424 | return; |
7425 | |
7426 | if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) |
7427 | op->func(ip, parent_ip, op, fregs); |
7428 | |
7429 | trace_clear_recursion(bit); |
7430 | } |
7431 | NOKPROBE_SYMBOL(ftrace_ops_assist_func); |
7432 | |
7433 | /** |
7434 | * ftrace_ops_get_func - get the function a trampoline should call |
7435 | * @ops: the ops to get the function for |
7436 | * |
7437 | * Normally the mcount trampoline will call the ops->func, but there |
7438 | * are times that it should not. For example, if the ops does not |
7439 | * have its own recursion protection, then it should call the |
7440 | * ftrace_ops_assist_func() instead. |
7441 | * |
7442 | * Returns the function that the trampoline should call for @ops. |
7443 | */ |
7444 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) |
7445 | { |
7446 | /* |
7447 | * If the function does not handle recursion or needs to be RCU safe, |
7448 | * then we need to call the assist handler. |
7449 | */ |
7450 | if (ops->flags & (FTRACE_OPS_FL_RECURSION | |
7451 | FTRACE_OPS_FL_RCU)) |
7452 | return ftrace_ops_assist_func; |
7453 | |
7454 | return ops->func; |
7455 | } |
7456 | |
7457 | static void |
7458 | ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, |
7459 | struct task_struct *prev, |
7460 | struct task_struct *next, |
7461 | unsigned int prev_state) |
7462 | { |
7463 | struct trace_array *tr = data; |
7464 | struct trace_pid_list *pid_list; |
7465 | struct trace_pid_list *no_pid_list; |
7466 | |
7467 | pid_list = rcu_dereference_sched(tr->function_pids); |
7468 | no_pid_list = rcu_dereference_sched(tr->function_no_pids); |
7469 | |
7470 | if (trace_ignore_this_task(filtered_pids: pid_list, filtered_no_pids: no_pid_list, task: next)) |
7471 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7472 | FTRACE_PID_IGNORE); |
7473 | else |
7474 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7475 | next->pid); |
7476 | } |
7477 | |
7478 | static void |
7479 | ftrace_pid_follow_sched_process_fork(void *data, |
7480 | struct task_struct *self, |
7481 | struct task_struct *task) |
7482 | { |
7483 | struct trace_pid_list *pid_list; |
7484 | struct trace_array *tr = data; |
7485 | |
7486 | pid_list = rcu_dereference_sched(tr->function_pids); |
7487 | trace_filter_add_remove_task(pid_list, self, task); |
7488 | |
7489 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7490 | trace_filter_add_remove_task(pid_list, self, task); |
7491 | } |
7492 | |
7493 | static void |
7494 | ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) |
7495 | { |
7496 | struct trace_pid_list *pid_list; |
7497 | struct trace_array *tr = data; |
7498 | |
7499 | pid_list = rcu_dereference_sched(tr->function_pids); |
7500 | trace_filter_add_remove_task(pid_list, NULL, task); |
7501 | |
7502 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7503 | trace_filter_add_remove_task(pid_list, NULL, task); |
7504 | } |
7505 | |
7506 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) |
7507 | { |
7508 | if (enable) { |
7509 | register_trace_sched_process_fork(probe: ftrace_pid_follow_sched_process_fork, |
7510 | data: tr); |
7511 | register_trace_sched_process_free(probe: ftrace_pid_follow_sched_process_exit, |
7512 | data: tr); |
7513 | } else { |
7514 | unregister_trace_sched_process_fork(probe: ftrace_pid_follow_sched_process_fork, |
7515 | data: tr); |
7516 | unregister_trace_sched_process_free(probe: ftrace_pid_follow_sched_process_exit, |
7517 | data: tr); |
7518 | } |
7519 | } |
7520 | |
7521 | static void clear_ftrace_pids(struct trace_array *tr, int type) |
7522 | { |
7523 | struct trace_pid_list *pid_list; |
7524 | struct trace_pid_list *no_pid_list; |
7525 | int cpu; |
7526 | |
7527 | pid_list = rcu_dereference_protected(tr->function_pids, |
7528 | lockdep_is_held(&ftrace_lock)); |
7529 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
7530 | lockdep_is_held(&ftrace_lock)); |
7531 | |
7532 | /* Make sure there's something to do */ |
7533 | if (!pid_type_enabled(type, pid_list, no_pid_list)) |
7534 | return; |
7535 | |
7536 | /* See if the pids still need to be checked after this */ |
7537 | if (!still_need_pid_events(type, pid_list, no_pid_list)) { |
7538 | unregister_trace_sched_switch(probe: ftrace_filter_pid_sched_switch_probe, data: tr); |
7539 | for_each_possible_cpu(cpu) |
7540 | per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE; |
7541 | } |
7542 | |
7543 | if (type & TRACE_PIDS) |
7544 | rcu_assign_pointer(tr->function_pids, NULL); |
7545 | |
7546 | if (type & TRACE_NO_PIDS) |
7547 | rcu_assign_pointer(tr->function_no_pids, NULL); |
7548 | |
7549 | /* Wait till all users are no longer using pid filtering */ |
7550 | synchronize_rcu(); |
7551 | |
7552 | if ((type & TRACE_PIDS) && pid_list) |
7553 | trace_pid_list_free(pid_list); |
7554 | |
7555 | if ((type & TRACE_NO_PIDS) && no_pid_list) |
7556 | trace_pid_list_free(pid_list: no_pid_list); |
7557 | } |
7558 | |
7559 | void ftrace_clear_pids(struct trace_array *tr) |
7560 | { |
7561 | mutex_lock(&ftrace_lock); |
7562 | |
7563 | clear_ftrace_pids(tr, type: TRACE_PIDS | TRACE_NO_PIDS); |
7564 | |
7565 | mutex_unlock(lock: &ftrace_lock); |
7566 | } |
7567 | |
7568 | static void ftrace_pid_reset(struct trace_array *tr, int type) |
7569 | { |
7570 | mutex_lock(&ftrace_lock); |
7571 | clear_ftrace_pids(tr, type); |
7572 | |
7573 | ftrace_update_pid_func(); |
7574 | ftrace_startup_all(command: 0); |
7575 | |
7576 | mutex_unlock(lock: &ftrace_lock); |
7577 | } |
7578 | |
7579 | /* Greater than any max PID */ |
7580 | #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) |
7581 | |
7582 | static void *fpid_start(struct seq_file *m, loff_t *pos) |
7583 | __acquires(RCU) |
7584 | { |
7585 | struct trace_pid_list *pid_list; |
7586 | struct trace_array *tr = m->private; |
7587 | |
7588 | mutex_lock(&ftrace_lock); |
7589 | rcu_read_lock_sched(); |
7590 | |
7591 | pid_list = rcu_dereference_sched(tr->function_pids); |
7592 | |
7593 | if (!pid_list) |
7594 | return !(*pos) ? FTRACE_NO_PIDS : NULL; |
7595 | |
7596 | return trace_pid_start(pid_list, pos); |
7597 | } |
7598 | |
7599 | static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) |
7600 | { |
7601 | struct trace_array *tr = m->private; |
7602 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); |
7603 | |
7604 | if (v == FTRACE_NO_PIDS) { |
7605 | (*pos)++; |
7606 | return NULL; |
7607 | } |
7608 | return trace_pid_next(pid_list, v, pos); |
7609 | } |
7610 | |
7611 | static void fpid_stop(struct seq_file *m, void *p) |
7612 | __releases(RCU) |
7613 | { |
7614 | rcu_read_unlock_sched(); |
7615 | mutex_unlock(lock: &ftrace_lock); |
7616 | } |
7617 | |
7618 | static int fpid_show(struct seq_file *m, void *v) |
7619 | { |
7620 | if (v == FTRACE_NO_PIDS) { |
7621 | seq_puts(m, s: "no pid\n" ); |
7622 | return 0; |
7623 | } |
7624 | |
7625 | return trace_pid_show(m, v); |
7626 | } |
7627 | |
7628 | static const struct seq_operations ftrace_pid_sops = { |
7629 | .start = fpid_start, |
7630 | .next = fpid_next, |
7631 | .stop = fpid_stop, |
7632 | .show = fpid_show, |
7633 | }; |
7634 | |
7635 | static void *fnpid_start(struct seq_file *m, loff_t *pos) |
7636 | __acquires(RCU) |
7637 | { |
7638 | struct trace_pid_list *pid_list; |
7639 | struct trace_array *tr = m->private; |
7640 | |
7641 | mutex_lock(&ftrace_lock); |
7642 | rcu_read_lock_sched(); |
7643 | |
7644 | pid_list = rcu_dereference_sched(tr->function_no_pids); |
7645 | |
7646 | if (!pid_list) |
7647 | return !(*pos) ? FTRACE_NO_PIDS : NULL; |
7648 | |
7649 | return trace_pid_start(pid_list, pos); |
7650 | } |
7651 | |
7652 | static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos) |
7653 | { |
7654 | struct trace_array *tr = m->private; |
7655 | struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids); |
7656 | |
7657 | if (v == FTRACE_NO_PIDS) { |
7658 | (*pos)++; |
7659 | return NULL; |
7660 | } |
7661 | return trace_pid_next(pid_list, v, pos); |
7662 | } |
7663 | |
7664 | static const struct seq_operations ftrace_no_pid_sops = { |
7665 | .start = fnpid_start, |
7666 | .next = fnpid_next, |
7667 | .stop = fpid_stop, |
7668 | .show = fpid_show, |
7669 | }; |
7670 | |
7671 | static int pid_open(struct inode *inode, struct file *file, int type) |
7672 | { |
7673 | const struct seq_operations *seq_ops; |
7674 | struct trace_array *tr = inode->i_private; |
7675 | struct seq_file *m; |
7676 | int ret = 0; |
7677 | |
7678 | ret = tracing_check_open_get_tr(tr); |
7679 | if (ret) |
7680 | return ret; |
7681 | |
7682 | if ((file->f_mode & FMODE_WRITE) && |
7683 | (file->f_flags & O_TRUNC)) |
7684 | ftrace_pid_reset(tr, type); |
7685 | |
7686 | switch (type) { |
7687 | case TRACE_PIDS: |
7688 | seq_ops = &ftrace_pid_sops; |
7689 | break; |
7690 | case TRACE_NO_PIDS: |
7691 | seq_ops = &ftrace_no_pid_sops; |
7692 | break; |
7693 | default: |
7694 | trace_array_put(tr); |
7695 | WARN_ON_ONCE(1); |
7696 | return -EINVAL; |
7697 | } |
7698 | |
7699 | ret = seq_open(file, seq_ops); |
7700 | if (ret < 0) { |
7701 | trace_array_put(tr); |
7702 | } else { |
7703 | m = file->private_data; |
7704 | /* copy tr over to seq ops */ |
7705 | m->private = tr; |
7706 | } |
7707 | |
7708 | return ret; |
7709 | } |
7710 | |
7711 | static int |
7712 | ftrace_pid_open(struct inode *inode, struct file *file) |
7713 | { |
7714 | return pid_open(inode, file, type: TRACE_PIDS); |
7715 | } |
7716 | |
7717 | static int |
7718 | ftrace_no_pid_open(struct inode *inode, struct file *file) |
7719 | { |
7720 | return pid_open(inode, file, type: TRACE_NO_PIDS); |
7721 | } |
7722 | |
7723 | static void ignore_task_cpu(void *data) |
7724 | { |
7725 | struct trace_array *tr = data; |
7726 | struct trace_pid_list *pid_list; |
7727 | struct trace_pid_list *no_pid_list; |
7728 | |
7729 | /* |
7730 | * This function is called by on_each_cpu() while the |
7731 | * event_mutex is held. |
7732 | */ |
7733 | pid_list = rcu_dereference_protected(tr->function_pids, |
7734 | mutex_is_locked(&ftrace_lock)); |
7735 | no_pid_list = rcu_dereference_protected(tr->function_no_pids, |
7736 | mutex_is_locked(&ftrace_lock)); |
7737 | |
7738 | if (trace_ignore_this_task(filtered_pids: pid_list, filtered_no_pids: no_pid_list, current)) |
7739 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7740 | FTRACE_PID_IGNORE); |
7741 | else |
7742 | this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, |
7743 | current->pid); |
7744 | } |
7745 | |
7746 | static ssize_t |
7747 | pid_write(struct file *filp, const char __user *ubuf, |
7748 | size_t cnt, loff_t *ppos, int type) |
7749 | { |
7750 | struct seq_file *m = filp->private_data; |
7751 | struct trace_array *tr = m->private; |
7752 | struct trace_pid_list *filtered_pids; |
7753 | struct trace_pid_list *other_pids; |
7754 | struct trace_pid_list *pid_list; |
7755 | ssize_t ret; |
7756 | |
7757 | if (!cnt) |
7758 | return 0; |
7759 | |
7760 | mutex_lock(&ftrace_lock); |
7761 | |
7762 | switch (type) { |
7763 | case TRACE_PIDS: |
7764 | filtered_pids = rcu_dereference_protected(tr->function_pids, |
7765 | lockdep_is_held(&ftrace_lock)); |
7766 | other_pids = rcu_dereference_protected(tr->function_no_pids, |
7767 | lockdep_is_held(&ftrace_lock)); |
7768 | break; |
7769 | case TRACE_NO_PIDS: |
7770 | filtered_pids = rcu_dereference_protected(tr->function_no_pids, |
7771 | lockdep_is_held(&ftrace_lock)); |
7772 | other_pids = rcu_dereference_protected(tr->function_pids, |
7773 | lockdep_is_held(&ftrace_lock)); |
7774 | break; |
7775 | default: |
7776 | ret = -EINVAL; |
7777 | WARN_ON_ONCE(1); |
7778 | goto out; |
7779 | } |
7780 | |
7781 | ret = trace_pid_write(filtered_pids, new_pid_list: &pid_list, ubuf, cnt); |
7782 | if (ret < 0) |
7783 | goto out; |
7784 | |
7785 | switch (type) { |
7786 | case TRACE_PIDS: |
7787 | rcu_assign_pointer(tr->function_pids, pid_list); |
7788 | break; |
7789 | case TRACE_NO_PIDS: |
7790 | rcu_assign_pointer(tr->function_no_pids, pid_list); |
7791 | break; |
7792 | } |
7793 | |
7794 | |
7795 | if (filtered_pids) { |
7796 | synchronize_rcu(); |
7797 | trace_pid_list_free(pid_list: filtered_pids); |
7798 | } else if (pid_list && !other_pids) { |
7799 | /* Register a probe to set whether to ignore the tracing of a task */ |
7800 | register_trace_sched_switch(probe: ftrace_filter_pid_sched_switch_probe, data: tr); |
7801 | } |
7802 | |
7803 | /* |
7804 | * Ignoring of pids is done at task switch. But we have to |
7805 | * check for those tasks that are currently running. |
7806 | * Always do this in case a pid was appended or removed. |
7807 | */ |
7808 | on_each_cpu(func: ignore_task_cpu, info: tr, wait: 1); |
7809 | |
7810 | ftrace_update_pid_func(); |
7811 | ftrace_startup_all(command: 0); |
7812 | out: |
7813 | mutex_unlock(lock: &ftrace_lock); |
7814 | |
7815 | if (ret > 0) |
7816 | *ppos += ret; |
7817 | |
7818 | return ret; |
7819 | } |
7820 | |
7821 | static ssize_t |
7822 | ftrace_pid_write(struct file *filp, const char __user *ubuf, |
7823 | size_t cnt, loff_t *ppos) |
7824 | { |
7825 | return pid_write(filp, ubuf, cnt, ppos, type: TRACE_PIDS); |
7826 | } |
7827 | |
7828 | static ssize_t |
7829 | ftrace_no_pid_write(struct file *filp, const char __user *ubuf, |
7830 | size_t cnt, loff_t *ppos) |
7831 | { |
7832 | return pid_write(filp, ubuf, cnt, ppos, type: TRACE_NO_PIDS); |
7833 | } |
7834 | |
7835 | static int |
7836 | ftrace_pid_release(struct inode *inode, struct file *file) |
7837 | { |
7838 | struct trace_array *tr = inode->i_private; |
7839 | |
7840 | trace_array_put(tr); |
7841 | |
7842 | return seq_release(inode, file); |
7843 | } |
7844 | |
7845 | static const struct file_operations ftrace_pid_fops = { |
7846 | .open = ftrace_pid_open, |
7847 | .write = ftrace_pid_write, |
7848 | .read = seq_read, |
7849 | .llseek = tracing_lseek, |
7850 | .release = ftrace_pid_release, |
7851 | }; |
7852 | |
7853 | static const struct file_operations ftrace_no_pid_fops = { |
7854 | .open = ftrace_no_pid_open, |
7855 | .write = ftrace_no_pid_write, |
7856 | .read = seq_read, |
7857 | .llseek = tracing_lseek, |
7858 | .release = ftrace_pid_release, |
7859 | }; |
7860 | |
7861 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) |
7862 | { |
7863 | trace_create_file(name: "set_ftrace_pid" , TRACE_MODE_WRITE, parent: d_tracer, |
7864 | data: tr, fops: &ftrace_pid_fops); |
7865 | trace_create_file(name: "set_ftrace_notrace_pid" , TRACE_MODE_WRITE, |
7866 | parent: d_tracer, data: tr, fops: &ftrace_no_pid_fops); |
7867 | } |
7868 | |
7869 | void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, |
7870 | struct dentry *d_tracer) |
7871 | { |
7872 | /* Only the top level directory has the dyn_tracefs and profile */ |
7873 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); |
7874 | |
7875 | ftrace_init_dyn_tracefs(d_tracer); |
7876 | ftrace_profile_tracefs(d_tracer); |
7877 | } |
7878 | |
7879 | /** |
7880 | * ftrace_kill - kill ftrace |
7881 | * |
7882 | * This function should be used by panic code. It stops ftrace |
7883 | * but in a not so nice way. If you need to simply kill ftrace |
7884 | * from a non-atomic section, use ftrace_kill. |
7885 | */ |
7886 | void ftrace_kill(void) |
7887 | { |
7888 | ftrace_disabled = 1; |
7889 | ftrace_enabled = 0; |
7890 | ftrace_trace_function = ftrace_stub; |
7891 | } |
7892 | |
7893 | /** |
7894 | * ftrace_is_dead - Test if ftrace is dead or not. |
7895 | * |
7896 | * Returns 1 if ftrace is "dead", zero otherwise. |
7897 | */ |
7898 | int ftrace_is_dead(void) |
7899 | { |
7900 | return ftrace_disabled; |
7901 | } |
7902 | |
7903 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
7904 | /* |
7905 | * When registering ftrace_ops with IPMODIFY, it is necessary to make sure |
7906 | * it doesn't conflict with any direct ftrace_ops. If there is existing |
7907 | * direct ftrace_ops on a kernel function being patched, call |
7908 | * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing. |
7909 | * |
7910 | * @ops: ftrace_ops being registered. |
7911 | * |
7912 | * Returns: |
7913 | * 0 on success; |
7914 | * Negative on failure. |
7915 | */ |
7916 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) |
7917 | { |
7918 | struct ftrace_func_entry *entry; |
7919 | struct ftrace_hash *hash; |
7920 | struct ftrace_ops *op; |
7921 | int size, i, ret; |
7922 | |
7923 | lockdep_assert_held_once(&direct_mutex); |
7924 | |
7925 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
7926 | return 0; |
7927 | |
7928 | hash = ops->func_hash->filter_hash; |
7929 | size = 1 << hash->size_bits; |
7930 | for (i = 0; i < size; i++) { |
7931 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
7932 | unsigned long ip = entry->ip; |
7933 | bool found_op = false; |
7934 | |
7935 | mutex_lock(&ftrace_lock); |
7936 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7937 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) |
7938 | continue; |
7939 | if (ops_references_ip(ops: op, ip)) { |
7940 | found_op = true; |
7941 | break; |
7942 | } |
7943 | } while_for_each_ftrace_op(op); |
7944 | mutex_unlock(lock: &ftrace_lock); |
7945 | |
7946 | if (found_op) { |
7947 | if (!op->ops_func) |
7948 | return -EBUSY; |
7949 | |
7950 | ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); |
7951 | if (ret) |
7952 | return ret; |
7953 | } |
7954 | } |
7955 | } |
7956 | |
7957 | return 0; |
7958 | } |
7959 | |
7960 | /* |
7961 | * Similar to prepare_direct_functions_for_ipmodify, clean up after ops |
7962 | * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT |
7963 | * ops. |
7964 | */ |
7965 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) |
7966 | { |
7967 | struct ftrace_func_entry *entry; |
7968 | struct ftrace_hash *hash; |
7969 | struct ftrace_ops *op; |
7970 | int size, i; |
7971 | |
7972 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) |
7973 | return; |
7974 | |
7975 | mutex_lock(&direct_mutex); |
7976 | |
7977 | hash = ops->func_hash->filter_hash; |
7978 | size = 1 << hash->size_bits; |
7979 | for (i = 0; i < size; i++) { |
7980 | hlist_for_each_entry(entry, &hash->buckets[i], hlist) { |
7981 | unsigned long ip = entry->ip; |
7982 | bool found_op = false; |
7983 | |
7984 | mutex_lock(&ftrace_lock); |
7985 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
7986 | if (!(op->flags & FTRACE_OPS_FL_DIRECT)) |
7987 | continue; |
7988 | if (ops_references_ip(ops: op, ip)) { |
7989 | found_op = true; |
7990 | break; |
7991 | } |
7992 | } while_for_each_ftrace_op(op); |
7993 | mutex_unlock(lock: &ftrace_lock); |
7994 | |
7995 | /* The cleanup is optional, ignore any errors */ |
7996 | if (found_op && op->ops_func) |
7997 | op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); |
7998 | } |
7999 | } |
8000 | mutex_unlock(lock: &direct_mutex); |
8001 | } |
8002 | |
8003 | #define lock_direct_mutex() mutex_lock(&direct_mutex) |
8004 | #define unlock_direct_mutex() mutex_unlock(&direct_mutex) |
8005 | |
8006 | #else /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
8007 | |
8008 | static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) |
8009 | { |
8010 | return 0; |
8011 | } |
8012 | |
8013 | static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) |
8014 | { |
8015 | } |
8016 | |
8017 | #define lock_direct_mutex() do { } while (0) |
8018 | #define unlock_direct_mutex() do { } while (0) |
8019 | |
8020 | #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ |
8021 | |
8022 | /* |
8023 | * Similar to register_ftrace_function, except we don't lock direct_mutex. |
8024 | */ |
8025 | static int register_ftrace_function_nolock(struct ftrace_ops *ops) |
8026 | { |
8027 | int ret; |
8028 | |
8029 | ftrace_ops_init(ops); |
8030 | |
8031 | mutex_lock(&ftrace_lock); |
8032 | |
8033 | ret = ftrace_startup(ops, command: 0); |
8034 | |
8035 | mutex_unlock(lock: &ftrace_lock); |
8036 | |
8037 | return ret; |
8038 | } |
8039 | |
8040 | /** |
8041 | * register_ftrace_function - register a function for profiling |
8042 | * @ops: ops structure that holds the function for profiling. |
8043 | * |
8044 | * Register a function to be called by all functions in the |
8045 | * kernel. |
8046 | * |
8047 | * Note: @ops->func and all the functions it calls must be labeled |
8048 | * with "notrace", otherwise it will go into a |
8049 | * recursive loop. |
8050 | */ |
8051 | int register_ftrace_function(struct ftrace_ops *ops) |
8052 | { |
8053 | int ret; |
8054 | |
8055 | lock_direct_mutex(); |
8056 | ret = prepare_direct_functions_for_ipmodify(ops); |
8057 | if (ret < 0) |
8058 | goto out_unlock; |
8059 | |
8060 | ret = register_ftrace_function_nolock(ops); |
8061 | |
8062 | out_unlock: |
8063 | unlock_direct_mutex(); |
8064 | return ret; |
8065 | } |
8066 | EXPORT_SYMBOL_GPL(register_ftrace_function); |
8067 | |
8068 | /** |
8069 | * unregister_ftrace_function - unregister a function for profiling. |
8070 | * @ops: ops structure that holds the function to unregister |
8071 | * |
8072 | * Unregister a function that was added to be called by ftrace profiling. |
8073 | */ |
8074 | int unregister_ftrace_function(struct ftrace_ops *ops) |
8075 | { |
8076 | int ret; |
8077 | |
8078 | mutex_lock(&ftrace_lock); |
8079 | ret = ftrace_shutdown(ops, command: 0); |
8080 | mutex_unlock(lock: &ftrace_lock); |
8081 | |
8082 | cleanup_direct_functions_after_ipmodify(ops); |
8083 | return ret; |
8084 | } |
8085 | EXPORT_SYMBOL_GPL(unregister_ftrace_function); |
8086 | |
8087 | static int symbols_cmp(const void *a, const void *b) |
8088 | { |
8089 | const char **str_a = (const char **) a; |
8090 | const char **str_b = (const char **) b; |
8091 | |
8092 | return strcmp(*str_a, *str_b); |
8093 | } |
8094 | |
8095 | struct kallsyms_data { |
8096 | unsigned long *addrs; |
8097 | const char **syms; |
8098 | size_t cnt; |
8099 | size_t found; |
8100 | }; |
8101 | |
8102 | /* This function gets called for all kernel and module symbols |
8103 | * and returns 1 in case we resolved all the requested symbols, |
8104 | * 0 otherwise. |
8105 | */ |
8106 | static int kallsyms_callback(void *data, const char *name, unsigned long addr) |
8107 | { |
8108 | struct kallsyms_data *args = data; |
8109 | const char **sym; |
8110 | int idx; |
8111 | |
8112 | sym = bsearch(key: &name, base: args->syms, num: args->cnt, size: sizeof(*args->syms), cmp: symbols_cmp); |
8113 | if (!sym) |
8114 | return 0; |
8115 | |
8116 | idx = sym - args->syms; |
8117 | if (args->addrs[idx]) |
8118 | return 0; |
8119 | |
8120 | if (!ftrace_location(ip: addr)) |
8121 | return 0; |
8122 | |
8123 | args->addrs[idx] = addr; |
8124 | args->found++; |
8125 | return args->found == args->cnt ? 1 : 0; |
8126 | } |
8127 | |
8128 | /** |
8129 | * ftrace_lookup_symbols - Lookup addresses for array of symbols |
8130 | * |
8131 | * @sorted_syms: array of symbols pointers symbols to resolve, |
8132 | * must be alphabetically sorted |
8133 | * @cnt: number of symbols/addresses in @syms/@addrs arrays |
8134 | * @addrs: array for storing resulting addresses |
8135 | * |
8136 | * This function looks up addresses for array of symbols provided in |
8137 | * @syms array (must be alphabetically sorted) and stores them in |
8138 | * @addrs array, which needs to be big enough to store at least @cnt |
8139 | * addresses. |
8140 | * |
8141 | * This function returns 0 if all provided symbols are found, |
8142 | * -ESRCH otherwise. |
8143 | */ |
8144 | int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs) |
8145 | { |
8146 | struct kallsyms_data args; |
8147 | int found_all; |
8148 | |
8149 | memset(addrs, 0, sizeof(*addrs) * cnt); |
8150 | args.addrs = addrs; |
8151 | args.syms = sorted_syms; |
8152 | args.cnt = cnt; |
8153 | args.found = 0; |
8154 | |
8155 | found_all = kallsyms_on_each_symbol(fn: kallsyms_callback, data: &args); |
8156 | if (found_all) |
8157 | return 0; |
8158 | found_all = module_kallsyms_on_each_symbol(NULL, fn: kallsyms_callback, data: &args); |
8159 | return found_all ? 0 : -ESRCH; |
8160 | } |
8161 | |
8162 | #ifdef CONFIG_SYSCTL |
8163 | |
8164 | #ifdef CONFIG_DYNAMIC_FTRACE |
8165 | static void ftrace_startup_sysctl(void) |
8166 | { |
8167 | int command; |
8168 | |
8169 | if (unlikely(ftrace_disabled)) |
8170 | return; |
8171 | |
8172 | /* Force update next time */ |
8173 | saved_ftrace_func = NULL; |
8174 | /* ftrace_start_up is true if we want ftrace running */ |
8175 | if (ftrace_start_up) { |
8176 | command = FTRACE_UPDATE_CALLS; |
8177 | if (ftrace_graph_active) |
8178 | command |= FTRACE_START_FUNC_RET; |
8179 | ftrace_startup_enable(command); |
8180 | } |
8181 | } |
8182 | |
8183 | static void ftrace_shutdown_sysctl(void) |
8184 | { |
8185 | int command; |
8186 | |
8187 | if (unlikely(ftrace_disabled)) |
8188 | return; |
8189 | |
8190 | /* ftrace_start_up is true if ftrace is running */ |
8191 | if (ftrace_start_up) { |
8192 | command = FTRACE_DISABLE_CALLS; |
8193 | if (ftrace_graph_active) |
8194 | command |= FTRACE_STOP_FUNC_RET; |
8195 | ftrace_run_update_code(command); |
8196 | } |
8197 | } |
8198 | #else |
8199 | # define ftrace_startup_sysctl() do { } while (0) |
8200 | # define ftrace_shutdown_sysctl() do { } while (0) |
8201 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
8202 | |
8203 | static bool is_permanent_ops_registered(void) |
8204 | { |
8205 | struct ftrace_ops *op; |
8206 | |
8207 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
8208 | if (op->flags & FTRACE_OPS_FL_PERMANENT) |
8209 | return true; |
8210 | } while_for_each_ftrace_op(op); |
8211 | |
8212 | return false; |
8213 | } |
8214 | |
8215 | static int |
8216 | ftrace_enable_sysctl(struct ctl_table *table, int write, |
8217 | void *buffer, size_t *lenp, loff_t *ppos) |
8218 | { |
8219 | int ret = -ENODEV; |
8220 | |
8221 | mutex_lock(&ftrace_lock); |
8222 | |
8223 | if (unlikely(ftrace_disabled)) |
8224 | goto out; |
8225 | |
8226 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
8227 | |
8228 | if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) |
8229 | goto out; |
8230 | |
8231 | if (ftrace_enabled) { |
8232 | |
8233 | /* we are starting ftrace again */ |
8234 | if (rcu_dereference_protected(ftrace_ops_list, |
8235 | lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) |
8236 | update_ftrace_function(); |
8237 | |
8238 | ftrace_startup_sysctl(); |
8239 | |
8240 | } else { |
8241 | if (is_permanent_ops_registered()) { |
8242 | ftrace_enabled = true; |
8243 | ret = -EBUSY; |
8244 | goto out; |
8245 | } |
8246 | |
8247 | /* stopping ftrace calls (just send to ftrace_stub) */ |
8248 | ftrace_trace_function = ftrace_stub; |
8249 | |
8250 | ftrace_shutdown_sysctl(); |
8251 | } |
8252 | |
8253 | last_ftrace_enabled = !!ftrace_enabled; |
8254 | out: |
8255 | mutex_unlock(lock: &ftrace_lock); |
8256 | return ret; |
8257 | } |
8258 | |
8259 | static struct ctl_table ftrace_sysctls[] = { |
8260 | { |
8261 | .procname = "ftrace_enabled" , |
8262 | .data = &ftrace_enabled, |
8263 | .maxlen = sizeof(int), |
8264 | .mode = 0644, |
8265 | .proc_handler = ftrace_enable_sysctl, |
8266 | }, |
8267 | {} |
8268 | }; |
8269 | |
8270 | static int __init ftrace_sysctl_init(void) |
8271 | { |
8272 | register_sysctl_init("kernel" , ftrace_sysctls); |
8273 | return 0; |
8274 | } |
8275 | late_initcall(ftrace_sysctl_init); |
8276 | #endif |
8277 | |